[llvm-branch-commits] [llvm] 80dee79 - [X86][SSE] Fold unpack(hop(), hop()) -> permute(hop())

Simon Pilgrim via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jan 8 07:30:37 PST 2021


Author: Simon Pilgrim
Date: 2021-01-08T15:22:17Z
New Revision: 80dee7965dffdfb866afa9d74f3a4a97453708b2

URL: https://github.com/llvm/llvm-project/commit/80dee7965dffdfb866afa9d74f3a4a97453708b2
DIFF: https://github.com/llvm/llvm-project/commit/80dee7965dffdfb866afa9d74f3a4a97453708b2.diff

LOG: [X86][SSE] Fold unpack(hop(),hop()) -> permute(hop())

UNPCKL/UNPCKH only uses one op from each hop, so we can merge the hops and then permute the result.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/horizontal-shuffle-2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 16f1023ed5f8..7b0e927a33d2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -37513,10 +37513,12 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
 
 /// Eliminate a redundant shuffle of a horizontal math op.
 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
+  // TODO: Can we use getTargetShuffleInputs instead?
   unsigned Opcode = N->getOpcode();
   if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
-    if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
-      return SDValue();
+    if (Opcode != X86ISD::UNPCKL && Opcode != X86ISD::UNPCKH)
+      if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
+        return SDValue();
 
   // For a broadcast, peek through an extract element of index 0 to find the
   // horizontal op: broadcast (ext_vec_elt HOp, 0)
@@ -37535,6 +37537,24 @@ static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
       HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
     return SDValue();
 
+  // unpck(hop,hop) -> permute(hop,hop).
+  if (Opcode == X86ISD::UNPCKL || Opcode == X86ISD::UNPCKH) {
+    SDValue HOp2 = N->getOperand(1);
+    if (HOp.getOpcode() != HOp2.getOpcode() || VT.getScalarSizeInBits() != 32)
+      return SDValue();
+    SDLoc DL(HOp);
+    unsigned LoHi = Opcode == X86ISD::UNPCKL ? 0 : 1;
+    SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, HOp.getOperand(LoHi),
+                              HOp2.getOperand(LoHi));
+    // Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
+    // combining and domain handling will simplify this later on.
+    EVT ShuffleVT = VT.changeVectorElementType(MVT::f32);
+    Res = DAG.getBitcast(ShuffleVT, Res);
+    Res = DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
+                      getV4X86ShuffleImm8ForMask({0, 2, 1, 3}, DL, DAG));
+    return DAG.getBitcast(VT, Res);
+  }
+
   // 128-bit horizontal math instructions are defined to operate on adjacent
   // lanes of each operand as:
   // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]

diff  --git a/llvm/test/CodeGen/X86/horizontal-shuffle-2.ll b/llvm/test/CodeGen/X86/horizontal-shuffle-2.ll
index c012c88c6ed2..6b4b8047d0f0 100644
--- a/llvm/test/CodeGen/X86/horizontal-shuffle-2.ll
+++ b/llvm/test/CodeGen/X86/horizontal-shuffle-2.ll
@@ -9,9 +9,8 @@
 define <4 x float> @test_unpacklo_hadd_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3) {
 ; CHECK-LABEL: test_unpacklo_hadd_v4f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    vhaddps %xmm0, %xmm2, %xmm1
-; CHECK-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    vhaddps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %0, <4 x float> %1) #4
   %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %3) #4
@@ -22,9 +21,8 @@ define <4 x float> @test_unpacklo_hadd_v4f32(<4 x float> %0, <4 x float> %1, <4
 define <4 x float> @test_unpackhi_hadd_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3) {
 ; CHECK-LABEL: test_unpackhi_hadd_v4f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vhaddps %xmm3, %xmm0, %xmm1
-; CHECK-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT:    vhaddps %xmm3, %xmm1, %xmm0
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %0, <4 x float> %1) #4
   %6 = tail call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %2, <4 x float> %3) #4
@@ -35,9 +33,8 @@ define <4 x float> @test_unpackhi_hadd_v4f32(<4 x float> %0, <4 x float> %1, <4
 define <4 x float> @test_unpacklo_hsub_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3) {
 ; CHECK-LABEL: test_unpacklo_hsub_v4f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    vhsubps %xmm0, %xmm2, %xmm1
-; CHECK-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    vhsubps %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %0, <4 x float> %1) #4
   %6 = tail call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %2, <4 x float> %3) #4
@@ -48,9 +45,8 @@ define <4 x float> @test_unpacklo_hsub_v4f32(<4 x float> %0, <4 x float> %1, <4
 define <4 x float> @test_unpackhi_hsub_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3) {
 ; CHECK-LABEL: test_unpackhi_hsub_v4f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhsubps %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vhsubps %xmm3, %xmm0, %xmm1
-; CHECK-NEXT:    vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT:    vhsubps %xmm3, %xmm1, %xmm0
+; CHECK-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %0, <4 x float> %1) #4
   %6 = tail call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %2, <4 x float> %3) #4
@@ -61,9 +57,8 @@ define <4 x float> @test_unpackhi_hsub_v4f32(<4 x float> %0, <4 x float> %1, <4
 define <4 x i32> @test_unpacklo_hadd_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3) {
 ; CHECK-LABEL: test_unpacklo_hadd_v4i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    vphaddd %xmm0, %xmm2, %xmm1
-; CHECK-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    vphaddd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %0, <4 x i32> %1) #5
   %6 = tail call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %2, <4 x i32> %3) #5
@@ -74,9 +69,8 @@ define <4 x i32> @test_unpacklo_hadd_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32>
 define <4 x i32> @test_unpackhi_hadd_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3) {
 ; CHECK-LABEL: test_unpackhi_hadd_v4i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vphaddd %xmm3, %xmm0, %xmm1
-; CHECK-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT:    vphaddd %xmm3, %xmm1, %xmm0
+; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %0, <4 x i32> %1) #5
   %6 = tail call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %2, <4 x i32> %3) #5
@@ -87,9 +81,8 @@ define <4 x i32> @test_unpackhi_hadd_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32>
 define <4 x i32> @test_unpacklo_hsub_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3) {
 ; CHECK-LABEL: test_unpacklo_hsub_v4i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    vphsubd %xmm0, %xmm2, %xmm1
-; CHECK-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT:    vphsubd %xmm2, %xmm0, %xmm0
+; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %0, <4 x i32> %1) #5
   %6 = tail call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %2, <4 x i32> %3) #5
@@ -100,9 +93,8 @@ define <4 x i32> @test_unpacklo_hsub_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32>
 define <4 x i32> @test_unpackhi_hsub_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3) {
 ; CHECK-LABEL: test_unpackhi_hsub_v4i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vphsubd %xmm3, %xmm0, %xmm1
-; CHECK-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; CHECK-NEXT:    vphsubd %xmm3, %xmm1, %xmm0
+; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %0, <4 x i32> %1) #5
   %6 = tail call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %2, <4 x i32> %3) #5
@@ -117,9 +109,8 @@ define <4 x i32> @test_unpackhi_hsub_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32>
 define <8 x float> @test_unpacklo_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
 ; CHECK-LABEL: test_unpacklo_hadd_v8f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhaddps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vhaddps %ymm0, %ymm2, %ymm1
-; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    vhaddps %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %0, <8 x float> %1) #4
   %6 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %2, <8 x float> %3) #4
@@ -130,9 +121,8 @@ define <8 x float> @test_unpacklo_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8
 define <8 x float> @test_unpackhi_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
 ; CHECK-LABEL: test_unpackhi_hadd_v8f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhaddps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vhaddps %ymm3, %ymm0, %ymm1
-; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    vhaddps %ymm3, %ymm1, %ymm0
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %0, <8 x float> %1) #4
   %6 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %2, <8 x float> %3) #4
@@ -143,9 +133,8 @@ define <8 x float> @test_unpackhi_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8
 define <8 x float> @test_unpacklo_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
 ; CHECK-LABEL: test_unpacklo_hsub_v8f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhsubps %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vhsubps %ymm0, %ymm2, %ymm1
-; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    vhsubps %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %0, <8 x float> %1) #4
   %6 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %2, <8 x float> %3) #4
@@ -156,9 +145,8 @@ define <8 x float> @test_unpacklo_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8
 define <8 x float> @test_unpackhi_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
 ; CHECK-LABEL: test_unpackhi_hsub_v8f32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vhsubps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vhsubps %ymm3, %ymm0, %ymm1
-; CHECK-NEXT:    vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    vhsubps %ymm3, %ymm1, %ymm0
+; CHECK-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %0, <8 x float> %1) #4
   %6 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %2, <8 x float> %3) #4
@@ -169,9 +157,8 @@ define <8 x float> @test_unpackhi_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8
 define <8 x i32> @test_unpacklo_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
 ; CHECK-LABEL: test_unpacklo_hadd_v8i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddd %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vphaddd %ymm0, %ymm2, %ymm1
-; CHECK-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    vphaddd %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %0, <8 x i32> %1) #5
   %6 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %2, <8 x i32> %3) #5
@@ -182,9 +169,8 @@ define <8 x i32> @test_unpacklo_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32>
 define <8 x i32> @test_unpackhi_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
 ; CHECK-LABEL: test_unpackhi_hadd_v8i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphaddd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vphaddd %ymm3, %ymm0, %ymm1
-; CHECK-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    vphaddd %ymm3, %ymm1, %ymm0
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %0, <8 x i32> %1) #5
   %6 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %2, <8 x i32> %3) #5
@@ -195,9 +181,8 @@ define <8 x i32> @test_unpackhi_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32>
 define <8 x i32> @test_unpacklo_hsub_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
 ; CHECK-LABEL: test_unpacklo_hsub_v8i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubd %ymm0, %ymm0, %ymm0
-; CHECK-NEXT:    vphsubd %ymm0, %ymm2, %ymm1
-; CHECK-NEXT:    vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; CHECK-NEXT:    vphsubd %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %0, <8 x i32> %1) #5
   %6 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %2, <8 x i32> %3) #5
@@ -208,9 +193,8 @@ define <8 x i32> @test_unpacklo_hsub_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32>
 define <8 x i32> @test_unpackhi_hsub_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
 ; CHECK-LABEL: test_unpackhi_hsub_v8i32:
 ; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vphsubd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vphsubd %ymm3, %ymm0, %ymm1
-; CHECK-NEXT:    vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; CHECK-NEXT:    vphsubd %ymm3, %ymm1, %ymm0
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
 ; CHECK-NEXT:    ret{{[l|q]}}
   %5 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %0, <8 x i32> %1) #5
   %6 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %2, <8 x i32> %3) #5


        


More information about the llvm-branch-commits mailing list