[llvm] r233199 - [X86, AVX] improve insertion into zero element of 256-bit vector
Sanjay Patel
spatel at rotateright.com
Wed Mar 25 10:36:02 PDT 2015
Author: spatel
Date: Wed Mar 25 12:36:01 2015
New Revision: 233199
URL: http://llvm.org/viewvc/llvm-project?rev=233199&view=rev
Log:
[X86, AVX] improve insertion into zero element of 256-bit vector
This patch allows AVX blend instructions to handle insertion into the low
element of a 256-bit vector for the appropriate data types.
For f32, instead of:
vblendps $1, %xmm1, %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[1,2,3]
vblendps $15, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
we get:
vblendps $1, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
For f64, instead of:
vmovsd %xmm1, %xmm0, %xmm1 ## xmm1 = xmm1[0],xmm0[1]
vblendpd $3, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0,1],ymm0[2,3]
we get:
vblendpd $1, %ymm1, %ymm0, %ymm0 ## ymm0 = ymm1[0],ymm0[1,2,3]
For the hardware-neglected integer data types, I left a TODO comment in the
code and added regression tests for a follow-on patch.
Differential Revision: http://reviews.llvm.org/D8609
Added:
llvm/trunk/test/CodeGen/X86/avx-insertelt.ll
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=233199&r1=233198&r2=233199&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Mar 25 12:36:01 2015
@@ -10551,6 +10551,20 @@ SDValue X86TargetLowering::LowerINSERT_V
// If the vector is wider than 128 bits, extract the 128-bit subvector, insert
// into that, and then insert the subvector back into the result.
if (VT.is256BitVector() || VT.is512BitVector()) {
+ // With a 256-bit vector, we can insert into the zero element efficiently
+ // using a blend if we have AVX or AVX2 and the right data type.
+ if (VT.is256BitVector() && IdxVal == 0) {
+ // TODO: It is worthwhile to cast integer to floating point and back
+ // and incur a domain crossing penalty if that's what we'll end up
+ // doing anyway after extracting to a 128-bit vector.
+ if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
+ (Subtarget->hasAVX2() && EltVT == MVT::i32)) {
+ SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
+ N2 = DAG.getIntPtrConstant(1);
+ return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
+ }
+ }
+
// Get the desired 128-bit vector chunk.
SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
Added: llvm/trunk/test/CodeGen/X86/avx-insertelt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-insertelt.ll?rev=233199&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-insertelt.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx-insertelt.ll Wed Mar 25 12:36:01 2015
@@ -0,0 +1,83 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+
+define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) {
+; ALL-LABEL: insert_f32:
+; ALL: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
+; ALL-NEXT: retq
+ %i0 = insertelement <8 x float> %y, float %f, i32 0
+ ret <8 x float> %i0
+}
+
+define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) {
+; ALL-LABEL: insert_f64:
+; ALL: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3]
+; ALL-NEXT: retq
+ %i0 = insertelement <4 x double> %y, double %f, i32 0
+ ret <4 x double> %i0
+}
+
+define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) {
+; AVX-LABEL: insert_i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: insert_i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: retq
+ %i0 = insertelement <32 x i8> %y, i8 %f, i32 0
+ ret <32 x i8> %i0
+}
+
+define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) {
+; AVX-LABEL: insert_i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: insert_i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: retq
+ %i0 = insertelement <16 x i16> %y, i16 %f, i32 0
+ ret <16 x i16> %i0
+}
+
+define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) {
+; AVX-LABEL: insert_i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: insert_i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vmovd %edi, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
+; AVX2-NEXT: retq
+ %i0 = insertelement <8 x i32> %y, i32 %f, i32 0
+ ret <8 x i32> %i0
+}
+
+define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) {
+; AVX-LABEL: insert_i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: retq
+;
+; AVX2-LABEL: insert_i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: retq
+ %i0 = insertelement <4 x i64> %y, i64 %f, i32 0
+ ret <4 x i64> %i0
+}
+
More information about the llvm-commits
mailing list