[llvm] r218522 - [x86] Fix a large collection of bugs that crept in as I fleshed out the

Chandler Carruth chandlerc at gmail.com
Fri Sep 26 10:11:03 PDT 2014


Author: chandlerc
Date: Fri Sep 26 12:11:02 2014
New Revision: 218522

URL: http://llvm.org/viewvc/llvm-project?rev=218522&view=rev
Log:
[x86] Fix a large collection of bugs that crept in as I fleshed out the
AVX support.

New test cases included. Note that none of the existing test cases
covered these buggy code paths. =/ Also, it is clear from this that
SHUFPS and SHUFPD are the most bug prone shuffle instructions in x86. =[

These were all detected by fuzz-testing. (I <3 fuzz testing.)

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=218522&r1=218521&r2=218522&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Sep 26 12:11:02 2014
@@ -7986,9 +7986,17 @@ static SDValue lowerVectorShuffleWithSHU
   } else if (NumV2Elements == 2) {
     if (Mask[0] < 4 && Mask[1] < 4) {
       // Handle the easy case where we have V1 in the low lanes and V2 in the
-      // high lanes. We never see this reversed because we sort the shuffle.
+      // high lanes.
       NewMask[2] -= 4;
       NewMask[3] -= 4;
+    } else if (Mask[2] < 4 && Mask[3] < 4) {
+      // We also handle the reversed case because this utility may get called
+      // when we detect a SHUFPS pattern but can't easily commute the shuffle to
+      // arrange things in the right direction.
+      NewMask[0] -= 4;
+      NewMask[1] -= 4;
+      HighV = V1;
+      LowV = V2;
     } else {
       // We have a mixture of V1 and V2 in both low and high lanes. Rather than
       // trying to place elements directly, just blend them and set up the final
@@ -9392,7 +9400,7 @@ static SDValue lowerVectorShuffleAsLaneP
     SmallVector<int, 32> FlippedBlendMask;
     for (int i = 0, Size = Mask.size(); i < Size; ++i)
       FlippedBlendMask.push_back(
-          Mask[i] < 0 ? -1 : ((Mask[i] / LaneSize == i / LaneSize)
+          Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
                                   ? Mask[i]
                                   : Mask[i] % LaneSize +
                                         (i / LaneSize) * LaneSize + Size));
@@ -9469,15 +9477,19 @@ static SDValue lowerV4F64VectorShuffle(S
     return Blend;
 
   // Check if the blend happens to exactly fit that of SHUFPD.
-  if (Mask[0] < 4 && (Mask[1] == -1 || Mask[1] >= 4) &&
-      Mask[2] < 4 && (Mask[3] == -1 || Mask[3] >= 4)) {
+  if ((Mask[0] == -1 || Mask[0] < 2) &&
+      (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
+      (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
+      (Mask[3] == -1 || Mask[3] >= 6)) {
     unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
                           ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
                        DAG.getConstant(SHUFPDMask, MVT::i8));
   }
-  if ((Mask[0] == -1 || Mask[0] >= 4) && Mask[1] < 4 &&
-      (Mask[2] == -1 || Mask[2] >= 4) && Mask[3] < 4) {
+  if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
+      (Mask[1] == -1 || Mask[1] < 2) &&
+      (Mask[2] == -1 || Mask[2] >= 6) &&
+      (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
     unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
                           ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
@@ -9564,8 +9576,10 @@ static SDValue lowerV8F32VectorShuffle(S
 
   // If the shuffle mask is repeated in each 128-bit lane, we have many more
   // options to efficiently lower the shuffle.
-  SmallVector<int, 2> RepeatedMask;
+  SmallVector<int, 4> RepeatedMask;
   if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
+    assert(RepeatedMask.size() == 4 &&
+           "Repeated masks must be half the mask width!");
     if (isSingleInputShuffleMask(Mask))
       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
                          getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
@@ -9577,12 +9591,12 @@ static SDValue lowerV8F32VectorShuffle(S
       return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
 
     // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
-    // have already handled any direct blends.
-    int SHUFPSMask[] = {Mask[0], Mask[1], Mask[2], Mask[3]};
-    for (int &M : SHUFPSMask)
-      if (M >= 8)
-        M -= 4;
-    return lowerVectorShuffleWithSHUPFS(DL, MVT::v8f32, SHUFPSMask, V1, V2, DAG);
+    // have already handled any direct blends. We also need to squash the
+    // repeated mask into a simulated v4f32 mask.
+    for (int i = 0; i < 4; ++i)
+      if (RepeatedMask[i] >= 8)
+        RepeatedMask[i] -= 4;
+    return lowerVectorShuffleWithSHUPFS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
   }
 
   // If we have a single input shuffle with different shuffle patterns in the

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=218522&r1=218521&r2=218522&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Fri Sep 26 12:11:02 2014
@@ -566,6 +566,46 @@ define <4 x i64> @shuffle_v4i64_4015(<4
   ret <4 x i64> %shuffle
 }
 
+define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: @shuffle_v4i64_2u35
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpermilpd {{.*}} # xmm2 = xmm0[1,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0,1,2],ymm1[3]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: @shuffle_v4i64_2u35
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpermq {{.*}} # ymm1 = ymm1[0,1,2,1]
+; AVX2-NEXT:    vpermq {{.*}} # ymm0 = ymm0[2,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-NEXT:    retq
+  %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 2, i32 undef, i32 3, i32 5>
+  ret <4 x i64> %shuffle
+}
+
+define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) {
+; AVX1-LABEL: @shuffle_v4i64_1251
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vshufpd {{.*}} # ymm0 = ymm0[1],ymm2[0],ymm0[2],ymm2[3]
+; AVX1-NEXT:    vpermilpd {{.*}} # xmm1 = xmm1[1,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT:    vblendpd {{.*}} # ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: @shuffle_v4i64_1251
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpermq {{.*}} # ymm1 = ymm1[0,1,1,3]
+; AVX2-NEXT:    vpermq {{.*}} # ymm0 = ymm0[1,2,2,1]
+; AVX2-NEXT:    vpblendd {{.*}} # ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT:    retq
+  %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 2, i32 5, i32 1>
+  ret <4 x i64> %shuffle
+}
+
 define <4 x i64> @stress_test1(<4 x i64> %a, <4 x i64> %b) {
 ; AVX1-LABEL: @stress_test1
 ; AVX1:       # BB#0:

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll?rev=218522&r1=218521&r2=218522&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll Fri Sep 26 12:11:02 2014
@@ -646,6 +646,29 @@ define <8 x float> @shuffle_v8f32_uuu3uu
   ret <8 x float> %shuffle
 }
 
+define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) {
+; AVX1-LABEL: @shuffle_v8f32_c348cda0
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*}} # ymm0 = ymm0[0,3],ymm2[0,0],ymm0[4,7],ymm2[4,4]
+; AVX1-NEXT:    vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vpermilps {{.*}} # ymm1 = ymm1[0,1,2,0,4,5,6,4]
+; AVX1-NEXT:    vblendps {{.*}} # ymm1 = ymm2[0],ymm1[1,2,3,4,5],ymm2[6],ymm1[7]
+; AVX1-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: @shuffle_v8f32_c348cda0
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovaps {{.*}} # ymm2 = <u,3,4,u,u,u,u,0>
+; AVX2-NEXT:    vpermps %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vmovaps {{.*}} # ymm2 = <4,u,u,0,4,5,2,u>
+; AVX2-NEXT:    vpermps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2],ymm1[3,4,5,6],ymm0[7]
+; AVX2-NEXT:    retq
+  %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 12, i32 3, i32 4, i32 8, i32 12, i32 13, i32 10, i32 0>
+  ret <8 x float> %shuffle
+}
+
 define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) {
 ; AVX1-LABEL: @shuffle_v8i32_00000000
 ; AVX1:       # BB#0:
@@ -1514,3 +1537,26 @@ define <8 x i32> @shuffle_v8i32_uuu3uu66
   %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 3, i32 undef, i32 undef, i32 6, i32 6>
   ret <8 x i32> %shuffle
 }
+
+define <8 x i32> @shuffle_v8i32_6caa87e5(<8 x i32> %a, <8 x i32> %b) {
+; AVX1-LABEL: @shuffle_v8i32_6caa87e5
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vperm2f128 {{.*}} # ymm2 = ymm1[2,3,0,1]
+; AVX1-NEXT:    vshufps {{.*}} # ymm1 = ymm2[0,0],ymm1[2,2],ymm2[4,4],ymm1[6,6]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpermilps {{.*}} # xmm2 = xmm0[2,1,2,3]
+; AVX1-NEXT:    vpermilps {{.*}} # xmm0 = xmm0[0,3,2,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: @shuffle_v8i32_6caa87e5
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vmovdqa {{.*}} # ymm2 = <u,4,2,2,0,u,6,u>
+; AVX2-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpermq {{.*}} # ymm0 = ymm0[3,1,3,2]
+; AVX2-NEXT:    vpblendd {{.*}} # ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6],ymm0[7]
+; AVX2-NEXT:    retq
+  %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 6, i32 12, i32 10, i32 10, i32 8, i32 7, i32 14, i32 5>
+  ret <8 x i32> %shuffle
+}





More information about the llvm-commits mailing list