[llvm] 85460a2 - [X86][SSE] Move unpack(hop,hop) fold from foldShuffleOfHorizOp to combineTargetShuffle

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed May 5 05:47:04 PDT 2021


Author: Simon Pilgrim
Date: 2021-05-05T13:36:09+01:00
New Revision: 85460a2f5b6309450b341d19d800a7d90786b941

URL: https://github.com/llvm/llvm-project/commit/85460a2f5b6309450b341d19d800a7d90786b941
DIFF: https://github.com/llvm/llvm-project/commit/85460a2f5b6309450b341d19d800a7d90786b941.diff

LOG: [X86][SSE] Move unpack(hop,hop) fold from foldShuffleOfHorizOp to combineTargetShuffle

By moving this after more of the shuffle canonicalization we reduce the demanded vector elts, avoiding a few unnecessary copies/moves etc.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/horizontal-sum.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d54e8672602a..3b89f383416c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -37566,6 +37566,29 @@ static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
     }
     return SDValue();
   }
+  case X86ISD::UNPCKL:
+  case X86ISD::UNPCKH: {
+    // unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
+    // unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
+    // Don't fold if hop(x,y) == hop(z,w).
+    // TODO: Merge this into canonicalizeShuffleMaskWithHorizOp?
+    SDValue N0 = N.getOperand(0);
+    SDValue N1 = N.getOperand(1);
+    if (VT.getScalarSizeInBits() == 32 && N0 != N1 &&
+        N0.getOpcode() == N1.getOpcode() && isHorizOp(N0.getOpcode())) {
+      unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
+      SDValue Res = DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(LoHi),
+                                N1.getOperand(LoHi));
+      // Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
+      // combining and domain handling will simplify this later on.
+      EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
+      Res = DAG.getBitcast(ShuffleVT, Res);
+      Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
+                        getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
+      return DAG.getBitcast(VT, Res);
+    }
+    return SDValue();
+  }
   case X86ISD::VPERMI: {
     // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
     // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
@@ -38071,9 +38094,8 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
 // TODO: Merge this into canonicalizeShuffleMaskWithHorizOp.
 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
   unsigned Opcode = N->getOpcode();
-  if (Opcode != X86ISD::UNPCKL && Opcode != X86ISD::UNPCKH)
-    if (Opcode != X86ISD::SHUFP)
-      return SDValue();
+  if (Opcode != X86ISD::SHUFP)
+    return SDValue();
 
   EVT VT = N->getValueType(0);
   SDValue HOp = N->getOperand(0);
@@ -38081,28 +38103,6 @@ static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
       HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
     return SDValue();
 
-  // unpcklo(hop(x,y),hop(z,w)) -> permute(hop(x,z)).
-  // unpckhi(hop(x,y),hop(z,w)) -> permute(hop(y,w)).
-  // Don't fold if hop(x,y) == hop(z,w).
-  if (Opcode == X86ISD::UNPCKL || Opcode == X86ISD::UNPCKH) {
-    SDValue HOp2 = N->getOperand(1);
-    if (HOp.getOpcode() != HOp2.getOpcode() || VT.getScalarSizeInBits() != 32)
-      return SDValue();
-    if (HOp == HOp2)
-      return SDValue();
-    SDLoc DL(HOp);
-    unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
-    SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, HOp.getOperand(LoHi),
-                              HOp2.getOperand(LoHi));
-    // Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
-    // combining and domain handling will simplify this later on.
-    EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
-    Res = DAG.getBitcast(ShuffleVT, Res);
-    Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
-                      getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
-    return DAG.getBitcast(VT, Res);
-  }
-
   // shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
   // Don't fold if hop(x,y) == hop(z,w).
   if (Opcode == X86ISD::SHUFP) {

diff  --git a/llvm/test/CodeGen/X86/horizontal-sum.ll b/llvm/test/CodeGen/X86/horizontal-sum.ll
index 8c50dd2e108f..0ddc333d3d4e 100644
--- a/llvm/test/CodeGen/X86/horizontal-sum.ll
+++ b/llvm/test/CodeGen/X86/horizontal-sum.ll
@@ -94,8 +94,8 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; SSSE3-SLOW-LABEL: pair_sum_v4i32_v4i32:
 ; SSSE3-SLOW:       # %bb.0:
 ; SSSE3-SLOW-NEXT:    phaddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
+; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
+; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSSE3-SLOW-NEXT:    paddd %xmm1, %xmm0
 ; SSSE3-SLOW-NEXT:    phaddd %xmm2, %xmm3
 ; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
@@ -115,9 +115,9 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; AVX1-SLOW-LABEL: pair_sum_v4i32_v4i32:
 ; AVX1-SLOW:       # %bb.0:
 ; AVX1-SLOW-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; AVX1-SLOW-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-SLOW-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX1-SLOW-NEXT:    vphaddd %xmm2, %xmm2, %xmm1
 ; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
 ; AVX1-SLOW-NEXT:    vpaddd %xmm2, %xmm1, %xmm1
@@ -184,57 +184,55 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
 ; SSSE3-SLOW:       # %bb.0:
 ; SSSE3-SLOW-NEXT:    haddps %xmm1, %xmm0
 ; SSSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,3,1,3]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSSE3-SLOW-NEXT:    addps %xmm1, %xmm0
-; SSSE3-SLOW-NEXT:    movaps %xmm2, %xmm1
-; SSSE3-SLOW-NEXT:    haddps %xmm3, %xmm1
-; SSSE3-SLOW-NEXT:    haddps %xmm2, %xmm3
+; SSSE3-SLOW-NEXT:    movaps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT:    haddps %xmm2, %xmm1
 ; SSSE3-SLOW-NEXT:    haddps %xmm4, %xmm5
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
-; SSSE3-SLOW-NEXT:    addps %xmm1, %xmm3
-; SSSE3-SLOW-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSSE3-SLOW-NEXT:    haddps %xmm3, %xmm2
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
+; SSSE3-SLOW-NEXT:    addps %xmm2, %xmm1
+; SSSE3-SLOW-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSSE3-SLOW-NEXT:    haddps %xmm7, %xmm6
 ; SSSE3-SLOW-NEXT:    haddps %xmm6, %xmm6
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,3]
-; SSSE3-SLOW-NEXT:    movaps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,3]
 ; SSSE3-SLOW-NEXT:    retq
 ;
 ; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
 ; SSSE3-FAST:       # %bb.0:
 ; SSSE3-FAST-NEXT:    haddps %xmm1, %xmm0
 ; SSSE3-FAST-NEXT:    haddps %xmm0, %xmm0
-; SSSE3-FAST-NEXT:    movaps %xmm2, %xmm1
-; SSSE3-FAST-NEXT:    haddps %xmm3, %xmm1
-; SSSE3-FAST-NEXT:    haddps %xmm2, %xmm3
+; SSSE3-FAST-NEXT:    movaps %xmm3, %xmm1
+; SSSE3-FAST-NEXT:    haddps %xmm2, %xmm1
 ; SSSE3-FAST-NEXT:    haddps %xmm4, %xmm5
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
-; SSSE3-FAST-NEXT:    addps %xmm1, %xmm3
-; SSSE3-FAST-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSSE3-FAST-NEXT:    haddps %xmm3, %xmm2
+; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
+; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
+; SSSE3-FAST-NEXT:    addps %xmm2, %xmm1
+; SSSE3-FAST-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSSE3-FAST-NEXT:    haddps %xmm6, %xmm6
 ; SSSE3-FAST-NEXT:    haddps %xmm7, %xmm7
 ; SSSE3-FAST-NEXT:    haddps %xmm7, %xmm6
-; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,2]
-; SSSE3-FAST-NEXT:    movaps %xmm3, %xmm1
+; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
 ; SSSE3-FAST-NEXT:    retq
 ;
 ; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
 ; AVX1-SLOW:       # %bb.0:
 ; AVX1-SLOW-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
 ; AVX1-SLOW-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
-; AVX1-SLOW-NEXT:    vaddps %xmm0, %xmm8, %xmm0
-; AVX1-SLOW-NEXT:    vhaddps %xmm3, %xmm2, %xmm1
-; AVX1-SLOW-NEXT:    vhaddps %xmm4, %xmm4, %xmm2
-; AVX1-SLOW-NEXT:    vhaddps %xmm5, %xmm5, %xmm3
-; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm4 = xmm1[0,2],xmm2[0,1]
-; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[0]
-; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,1]
-; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[1]
-; AVX1-SLOW-NEXT:    vaddps %xmm1, %xmm4, %xmm1
+; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
+; AVX1-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-SLOW-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT:    vhaddps %xmm4, %xmm4, %xmm1
+; AVX1-SLOW-NEXT:    vhaddps %xmm5, %xmm5, %xmm4
+; AVX1-SLOW-NEXT:    vhaddps %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
+; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; AVX1-SLOW-NEXT:    vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1]
+; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
+; AVX1-SLOW-NEXT:    vaddps %xmm1, %xmm3, %xmm1
 ; AVX1-SLOW-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX1-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
 ; AVX1-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -247,34 +245,34 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
 ; AVX1-FAST-LABEL: pair_sum_v8f32_v4f32:
 ; AVX1-FAST:       # %bb.0:
 ; AVX1-FAST-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; AVX1-FAST-NEXT:    vhaddps %xmm3, %xmm2, %xmm1
-; AVX1-FAST-NEXT:    vhaddps %xmm2, %xmm2, %xmm2
-; AVX1-FAST-NEXT:    vhaddps %xmm3, %xmm3, %xmm3
-; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[1],xmm3[1],zero,zero
-; AVX1-FAST-NEXT:    vhaddps %xmm4, %xmm4, %xmm3
+; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm8
+; AVX1-FAST-NEXT:    vhaddps %xmm2, %xmm2, %xmm1
+; AVX1-FAST-NEXT:    vhaddps %xmm3, %xmm3, %xmm0
+; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[1],xmm0[1],zero,zero
+; AVX1-FAST-NEXT:    vhaddps %xmm4, %xmm4, %xmm1
 ; AVX1-FAST-NEXT:    vhaddps %xmm5, %xmm5, %xmm4
-; AVX1-FAST-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,1]
-; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; AVX1-FAST-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
-; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[1]
-; AVX1-FAST-NEXT:    vaddps %xmm2, %xmm1, %xmm1
-; AVX1-FAST-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX1-FAST-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-FAST-NEXT:    vhaddps %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT:    vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
+; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
+; AVX1-FAST-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
+; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[1]
+; AVX1-FAST-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; AVX1-FAST-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm8[0],xmm0[0]
+; AVX1-FAST-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-FAST-NEXT:    vhaddps %xmm7, %xmm6, %xmm2
 ; AVX1-FAST-NEXT:    vhaddps %xmm0, %xmm2, %xmm2
-; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FAST-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
+; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-FAST-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
 ; AVX1-FAST-NEXT:    retq
 ;
 ; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
 ; AVX2-SLOW-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
-; AVX2-SLOW-NEXT:    vaddps %xmm0, %xmm8, %xmm0
+; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vaddps %xmm1, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vhaddps %xmm4, %xmm4, %xmm1
 ; AVX2-SLOW-NEXT:    vhaddps %xmm5, %xmm5, %xmm4
 ; AVX2-SLOW-NEXT:    vhaddps %xmm3, %xmm2, %xmm2
@@ -364,17 +362,17 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; SSSE3-SLOW-LABEL: pair_sum_v8i32_v4i32:
 ; SSSE3-SLOW:       # %bb.0:
 ; SSSE3-SLOW-NEXT:    phaddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
+; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
+; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; SSSE3-SLOW-NEXT:    paddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT:    phaddd %xmm3, %xmm2
 ; SSSE3-SLOW-NEXT:    phaddd %xmm4, %xmm5
 ; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
-; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1]
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
+; SSSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,1,0,1]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
+; SSSE3-SLOW-NEXT:    phaddd %xmm3, %xmm2
 ; SSSE3-SLOW-NEXT:    movdqa %xmm2, %xmm1
 ; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[2,0]
+; SSSE3-SLOW-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[2,0]
 ; SSSE3-SLOW-NEXT:    paddd %xmm1, %xmm2
 ; SSSE3-SLOW-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSSE3-SLOW-NEXT:    phaddd %xmm7, %xmm6
@@ -388,12 +386,12 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; SSSE3-FAST:       # %bb.0:
 ; SSSE3-FAST-NEXT:    phaddd %xmm1, %xmm0
 ; SSSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
-; SSSE3-FAST-NEXT:    phaddd %xmm3, %xmm2
 ; SSSE3-FAST-NEXT:    movdqa %xmm5, %xmm1
 ; SSSE3-FAST-NEXT:    phaddd %xmm4, %xmm5
 ; SSSE3-FAST-NEXT:    phaddd %xmm4, %xmm4
 ; SSSE3-FAST-NEXT:    phaddd %xmm1, %xmm1
 ; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
+; SSSE3-FAST-NEXT:    phaddd %xmm3, %xmm2
 ; SSSE3-FAST-NEXT:    movdqa %xmm2, %xmm3
 ; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[2,0]
 ; SSSE3-FAST-NEXT:    shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[2,0]
@@ -409,20 +407,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
 ; AVX1-SLOW:       # %bb.0:
 ; AVX1-SLOW-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
-; AVX1-SLOW-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT:    vphaddd %xmm3, %xmm2, %xmm1
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
-; AVX1-SLOW-NEXT:    vphaddd %xmm4, %xmm4, %xmm3
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX1-SLOW-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT:    vphaddd %xmm4, %xmm4, %xmm1
 ; AVX1-SLOW-NEXT:    vphaddd %xmm5, %xmm5, %xmm4
-; AVX1-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX1-SLOW-NEXT:    vphaddd %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
+; AVX1-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
 ; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
-; AVX1-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm5[6,7]
-; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,1]
-; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1],zero
+; AVX1-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
+; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,3,1,1]
+; AVX1-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[1],zero
 ; AVX1-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
-; AVX1-SLOW-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-SLOW-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
 ; AVX1-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX1-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
 ; AVX1-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -436,20 +434,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; AVX1-FAST:       # %bb.0:
 ; AVX1-FAST-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
 ; AVX1-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm8
-; AVX1-FAST-NEXT:    vphaddd %xmm3, %xmm2, %xmm1
-; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
-; AVX1-FAST-NEXT:    vphaddd %xmm2, %xmm2, %xmm2
-; AVX1-FAST-NEXT:    vphaddd %xmm3, %xmm3, %xmm3
+; AVX1-FAST-NEXT:    vphaddd %xmm2, %xmm2, %xmm1
+; AVX1-FAST-NEXT:    vphaddd %xmm3, %xmm3, %xmm0
 ; AVX1-FAST-NEXT:    vphaddd %xmm4, %xmm4, %xmm4
 ; AVX1-FAST-NEXT:    vphaddd %xmm5, %xmm5, %xmm5
-; AVX1-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[0,0,0,0]
-; AVX1-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
-; AVX1-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
-; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[1],zero
-; AVX1-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[3]
-; AVX1-FAST-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vphaddd %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
+; AVX1-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
+; AVX1-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
+; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; AVX1-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
+; AVX1-FAST-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[1],zero
+; AVX1-FAST-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
+; AVX1-FAST-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
 ; AVX1-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
 ; AVX1-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX1-FAST-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -462,9 +460,9 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
 ; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
-; AVX2-SLOW-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX2-SLOW-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vphaddd %xmm4, %xmm4, %xmm1
 ; AVX2-SLOW-NEXT:    vphaddd %xmm5, %xmm5, %xmm4
 ; AVX2-SLOW-NEXT:    vphaddd %xmm3, %xmm2, %xmm2
@@ -1147,13 +1145,13 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
 ; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
 ; AVX-FAST-NEXT:    vpaddd %xmm4, %xmm1, %xmm1
 ; AVX-FAST-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
 ; AVX-FAST-NEXT:    vpaddd %xmm1, %xmm2, %xmm1
 ; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
 ; AVX-FAST-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
 ; AVX-FAST-NEXT:    vphaddd %xmm2, %xmm1, %xmm1
 ; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-FAST-NEXT:    retq
   %5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)


        


More information about the llvm-commits mailing list