[llvm] r345520 - [X86][SSE] getFauxShuffleMask - Fix shuffle mask adjustment for multiple inserted subvectors

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 29 11:25:48 PDT 2018


Author: rksimon
Date: Mon Oct 29 11:25:48 2018
New Revision: 345520

URL: http://llvm.org/viewvc/llvm-project?rev=345520&view=rev
Log:
[X86][SSE] getFauxShuffleMask - Fix shuffle mask adjustment for multiple inserted subvectors

Part of the issue discovered in PR39483, although its not fully exposed until I reapply rL345395 (by reverting rL345451)

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=345520&r1=345519&r2=345520&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Oct 29 11:25:48 2018
@@ -6379,13 +6379,12 @@ static bool getFauxShuffleMask(SDValue N
       Mask.push_back(i);
     for (int i = 0; i != (int)NumSubElts; ++i) {
       int M = SubMask[i];
-      if (M < 0) {
-        Mask[i + InsertIdx] = M;
-      } else {
+      if (0 <= M) {
         int InputIdx = M / NumSubElts;
         int ExtractIdx = SubInputs[InputIdx].getConstantOperandVal(1);
-        Mask[i + InsertIdx] = (NumElts * (1 + InputIdx)) + ExtractIdx + M;
+        M = (NumElts * (1 + InputIdx)) + ExtractIdx + (M % NumSubElts);
       }
+      Mask[i + InsertIdx] = M;
     }
     // TODO - Add support for more than 1 subinput.
     return Ops.size() <= 2;

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll?rev=345520&r1=345519&r2=345520&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll Mon Oct 29 11:25:48 2018
@@ -435,3 +435,100 @@ define <8 x float> @constant_fold_vpermi
   %1 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>, <8 x i32> <i32 4, i32 0, i32 2, i32 1, i32 0, i32 1, i32 1, i32 1>)
   ret <8 x float> %1
 }
+
+define void @PR39483() {
+; X32-AVX1-LABEL: PR39483:
+; X32-AVX1:       # %bb.0: # %entry
+; X32-AVX1-NEXT:    vmovups 32, %ymm0
+; X32-AVX1-NEXT:    vmovups 64, %ymm1
+; X32-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; X32-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,3]
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; X32-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X32-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
+; X32-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; X32-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; X32-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X32-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X32-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X32-AVX1-NEXT:    vmovups %ymm0, (%eax)
+;
+; X32-AVX2-LABEL: PR39483:
+; X32-AVX2:       # %bb.0: # %entry
+; X32-AVX2-NEXT:    vmovups 32, %ymm0
+; X32-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X32-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
+; X32-AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; X32-AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
+; X32-AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
+; X32-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X32-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X32-AVX2-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT:    vmovups %ymm0, (%eax)
+;
+; X32-AVX512-LABEL: PR39483:
+; X32-AVX512:       # %bb.0: # %entry
+; X32-AVX512-NEXT:    vmovups 0, %zmm0
+; X32-AVX512-NEXT:    vmovups 64, %ymm1
+; X32-AVX512-NEXT:    vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
+; X32-AVX512-NEXT:    vpermi2ps %zmm1, %zmm0, %zmm2
+; X32-AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X32-AVX512-NEXT:    vmulps %ymm0, %ymm2, %ymm1
+; X32-AVX512-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; X32-AVX512-NEXT:    vmovups %ymm0, (%eax)
+;
+; X64-AVX1-LABEL: PR39483:
+; X64-AVX1:       # %bb.0: # %entry
+; X64-AVX1-NEXT:    vmovups 32, %ymm0
+; X64-AVX1-NEXT:    vmovups 64, %ymm1
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; X64-AVX1-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0,3]
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X64-AVX1-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3]
+; X64-AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,1,0,3]
+; X64-AVX1-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X64-AVX1-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X64-AVX1-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X64-AVX1-NEXT:    vmovups %ymm0, (%rax)
+;
+; X64-AVX2-LABEL: PR39483:
+; X64-AVX2:       # %bb.0: # %entry
+; X64-AVX2-NEXT:    vmovups 32, %ymm0
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; X64-AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <2,5,0,3,6,u,u,u>
+; X64-AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; X64-AVX2-NEXT:    vpermilps {{.*#+}} ymm1 = mem[0,1,0,3,4,5,4,7]
+; X64-AVX2-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
+; X64-AVX2-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; X64-AVX2-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX2-NEXT:    vmulps %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vaddps %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vmovups %ymm0, (%rax)
+;
+; X64-AVX512-LABEL: PR39483:
+; X64-AVX512:       # %bb.0: # %entry
+; X64-AVX512-NEXT:    vmovups 0, %zmm0
+; X64-AVX512-NEXT:    vmovups 64, %ymm1
+; X64-AVX512-NEXT:    vmovaps {{.*#+}} zmm2 = <2,5,8,11,14,17,20,23,u,u,u,u,u,u,u,u>
+; X64-AVX512-NEXT:    vpermi2ps %zmm1, %zmm0, %zmm2
+; X64-AVX512-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vmulps %ymm0, %ymm2, %ymm1
+; X64-AVX512-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; X64-AVX512-NEXT:    vmovups %ymm0, (%rax)
+entry:
+  %wide.vec = load <24 x float>, <24 x float>* null, align 4
+  %strided.vec18 = shufflevector <24 x float> %wide.vec, <24 x float> undef, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 14, i32 17, i32 20, i32 23>
+  %0 = fmul <8 x float> %strided.vec18, zeroinitializer
+  %1 = fadd <8 x float> zeroinitializer, %0
+  store <8 x float> %1, <8 x float>* undef, align 16
+  unreachable
+}




More information about the llvm-commits mailing list