[llvm] 89d9ff8 - [X86][SSE] foldShuffleOfHorizOp - add SHUFPS v4f32 handling

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 9 06:18:59 PST 2021


Author: Simon Pilgrim
Date: 2021-02-09T14:18:45Z
New Revision: 89d9ff82293f13d0dcb0dd173785196e42a3d8a9

URL: https://github.com/llvm/llvm-project/commit/89d9ff82293f13d0dcb0dd173785196e42a3d8a9
DIFF: https://github.com/llvm/llvm-project/commit/89d9ff82293f13d0dcb0dd173785196e42a3d8a9.diff

LOG: [X86][SSE] foldShuffleOfHorizOp - add SHUFPS v4f32 handling

Fold shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) - this is very similar to the equivalent unpack fold.

I did start trying to convert foldShuffleOfHorizOp to handle generic shuffle masks but we're relying on a lot of special cases at the moment.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/horizontal-sum.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bec7437a872d..a126a69b1200 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -37861,8 +37861,9 @@ static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
   unsigned Opcode = N->getOpcode();
   if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
     if (Opcode != X86ISD::UNPCKL && Opcode != X86ISD::UNPCKH)
-      if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
-        return SDValue();
+      if (Opcode != X86ISD::SHUFP)
+        if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
+          return SDValue();
 
   // For a broadcast, peek through an extract element of index 0 to find the
   // horizontal op: broadcast (ext_vec_elt HOp, 0)
@@ -37903,6 +37904,32 @@ static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
     return DAG.getBitcast(VT, Res);
   }
 
+  // shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
+  // Don't fold if hop(x,y) == hop(z,w).
+  if (Opcode == X86ISD::SHUFP) {
+    SDValue HOp2 = N->getOperand(1);
+    if (HOp.getOpcode() != HOp2.getOpcode() || VT != MVT::v4f32 || HOp == HOp2)
+      return SDValue();
+    SmallVector<int> RepeatedMask;
+    DecodeSHUFPMask(4, 32, N->getConstantOperandVal(2), RepeatedMask);
+    SDValue Op0 = HOp.getOperand(RepeatedMask[0] >= 2 ? 1 : 0);
+    SDValue Op1 = HOp.getOperand(RepeatedMask[1] >= 2 ? 1 : 0);
+    SDValue Op2 = HOp2.getOperand(RepeatedMask[2] >= 6 ? 1 : 0);
+    SDValue Op3 = HOp2.getOperand(RepeatedMask[3] >= 6 ? 1 : 0);
+    if ((Op0 == Op1) && (Op2 == Op3)) {
+      int NewMask[4] = {RepeatedMask[0] % 2, RepeatedMask[1] % 2,
+                        ((RepeatedMask[2] - 4) % 2) + 2,
+                        ((RepeatedMask[3] - 4) % 2) + 2};
+      SDLoc DL(HOp);
+      SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, Op0, Op2);
+      // Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
+      // combining and domain handling will simplify this later on.
+      return DAG.getNode(X86ISD::SHUFP, DL, VT, Res, Res,
+                         getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
+    }
+    return SDValue();
+  }
+
   // 128-bit horizontal math instructions are defined to operate on adjacent
   // lanes of each operand as:
   // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]

diff  --git a/llvm/test/CodeGen/X86/horizontal-sum.ll b/llvm/test/CodeGen/X86/horizontal-sum.ll
index a5b34c482474..dbccdbd844d2 100644
--- a/llvm/test/CodeGen/X86/horizontal-sum.ll
+++ b/llvm/test/CodeGen/X86/horizontal-sum.ll
@@ -218,31 +218,27 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, <4 x float> %5, <4 x float> %6, <4 x float> %7) {
 ; SSSE3-SLOW-LABEL: pair_sum_v8f32_v4f32:
 ; SSSE3-SLOW:       # %bb.0:
+; SSSE3-SLOW-NEXT:    movaps %xmm3, %xmm8
 ; SSSE3-SLOW-NEXT:    haddps %xmm0, %xmm0
-; SSSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm8 = xmm0[1,1,3,3]
-; SSSE3-SLOW-NEXT:    addps %xmm8, %xmm0
+; SSSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSSE3-SLOW-NEXT:    addps %xmm3, %xmm0
 ; SSSE3-SLOW-NEXT:    haddps %xmm1, %xmm1
-; SSSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm8 = xmm1[1,1,3,3]
-; SSSE3-SLOW-NEXT:    addps %xmm1, %xmm8
-; SSSE3-SLOW-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
+; SSSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSSE3-SLOW-NEXT:    addps %xmm1, %xmm3
+; SSSE3-SLOW-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
 ; SSSE3-SLOW-NEXT:    movaps %xmm2, %xmm1
-; SSSE3-SLOW-NEXT:    haddps %xmm3, %xmm1
-; SSSE3-SLOW-NEXT:    haddps %xmm2, %xmm2
-; SSSE3-SLOW-NEXT:    haddps %xmm3, %xmm3
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[1,0]
-; SSSE3-SLOW-NEXT:    movaps %xmm5, %xmm2
+; SSSE3-SLOW-NEXT:    haddps %xmm8, %xmm1
+; SSSE3-SLOW-NEXT:    haddps %xmm2, %xmm8
 ; SSSE3-SLOW-NEXT:    haddps %xmm4, %xmm5
-; SSSE3-SLOW-NEXT:    haddps %xmm4, %xmm4
-; SSSE3-SLOW-NEXT:    haddps %xmm2, %xmm2
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[1,1]
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,0]
 ; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-SLOW-NEXT:    addps %xmm3, %xmm1
-; SSSE3-SLOW-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm8 = xmm8[3,1],xmm5[3,1]
+; SSSE3-SLOW-NEXT:    addps %xmm1, %xmm8
+; SSSE3-SLOW-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm8[0]
 ; SSSE3-SLOW-NEXT:    haddps %xmm6, %xmm6
 ; SSSE3-SLOW-NEXT:    haddps %xmm7, %xmm7
 ; SSSE3-SLOW-NEXT:    haddps %xmm7, %xmm6
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm8 = xmm8[2,3],xmm6[0,2]
+; SSSE3-SLOW-NEXT:    movaps %xmm8, %xmm1
 ; SSSE3-SLOW-NEXT:    retq
 ;
 ; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
@@ -251,22 +247,17 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
 ; SSSE3-FAST-NEXT:    haddps %xmm0, %xmm0
 ; SSSE3-FAST-NEXT:    movaps %xmm2, %xmm1
 ; SSSE3-FAST-NEXT:    haddps %xmm3, %xmm1
-; SSSE3-FAST-NEXT:    haddps %xmm2, %xmm2
-; SSSE3-FAST-NEXT:    haddps %xmm3, %xmm3
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,0],xmm2[1,0]
-; SSSE3-FAST-NEXT:    movaps %xmm5, %xmm2
+; SSSE3-FAST-NEXT:    haddps %xmm2, %xmm3
 ; SSSE3-FAST-NEXT:    haddps %xmm4, %xmm5
-; SSSE3-FAST-NEXT:    haddps %xmm4, %xmm4
-; SSSE3-FAST-NEXT:    haddps %xmm2, %xmm2
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[1,1]
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,0]
 ; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-FAST-NEXT:    addps %xmm3, %xmm1
-; SSSE3-FAST-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
+; SSSE3-FAST-NEXT:    addps %xmm1, %xmm3
+; SSSE3-FAST-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
 ; SSSE3-FAST-NEXT:    haddps %xmm6, %xmm6
 ; SSSE3-FAST-NEXT:    haddps %xmm7, %xmm7
 ; SSSE3-FAST-NEXT:    haddps %xmm7, %xmm6
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
+; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,2]
+; SSSE3-FAST-NEXT:    movaps %xmm3, %xmm1
 ; SSSE3-FAST-NEXT:    retq
 ;
 ; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:


        


More information about the llvm-commits mailing list