[llvm] r359707 - [X86][SSE] Fold scalar horizontal add/sub for non-0/1 element extractions

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed May 1 10:13:35 PDT 2019


Author: rksimon
Date: Wed May  1 10:13:35 2019
New Revision: 359707

URL: http://llvm.org/viewvc/llvm-project?rev=359707&view=rev
Log:
[X86][SSE] Fold scalar horizontal add/sub for non-0/1 element extractions

We already perform horizontal add/sub if we extract from elements 0 and 1, this patch extends it to non-0/1 element extraction indices (as long as they are from the lowest 128-bit vector).

Differential Revision: https://reviews.llvm.org/D61263

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/haddsub.ll
    llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=359707&r1=359706&r2=359707&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed May  1 10:13:35 2019
@@ -19020,33 +19020,38 @@ static SDValue lowerAddSubToHorizontalOp
   }
   unsigned LExtIndex = LHS.getConstantOperandVal(1);
   unsigned RExtIndex = RHS.getConstantOperandVal(1);
-  if (LExtIndex == 1 && RExtIndex == 0 &&
+  if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
       (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
     std::swap(LExtIndex, RExtIndex);
 
-  // TODO: This can be extended to handle other adjacent extract pairs.
-  if (LExtIndex != 0 || RExtIndex != 1)
+  if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
     return Op;
 
   SDValue X = LHS.getOperand(0);
   EVT VecVT = X.getValueType();
   unsigned BitWidth = VecVT.getSizeInBits();
+  unsigned NumLanes = BitWidth / 128;
+  unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
   assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
          "Not expecting illegal vector widths here");
 
   // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
-  // equivalent, so extract the 256/512-bit source op to 128-bit.
-  // This is free: ymm/zmm -> xmm.
+  // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
+  // This is free if we're extracting from the bottom lane: ymm/zmm -> xmm.
+  if (NumEltsPerLane <= LExtIndex)
+   return Op;
+
   SDLoc DL(Op);
   if (BitWidth == 256 || BitWidth == 512)
     X = extract128BitVector(X, 0, DAG, DL);
 
   // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
   // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
+  // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
   // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
   SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
-                     DAG.getIntPtrConstant(0, DL));
+                     DAG.getIntPtrConstant(LExtIndex / 2, DL));
 }
 
 /// Depending on uarch and/or optimizing for size, we might prefer to use a

Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=359707&r1=359706&r2=359707&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Wed May  1 10:13:35 2019
@@ -618,20 +618,32 @@ define float @extract_extract01_v4f32_fa
 }
 
 define float @extract_extract23_v4f32_fadd_f32(<4 x float> %x) {
-; SSE3-LABEL: extract_extract23_v4f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4f32_fadd_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 2
   %x1 = extractelement <4 x float> %x, i32 3
   %x01 = fadd float %x0, %x1
@@ -667,20 +679,32 @@ define float @extract_extract01_v4f32_fa
 }
 
 define float @extract_extract23_v4f32_fadd_f32_commute(<4 x float> %x) {
-; SSE3-LABEL: extract_extract23_v4f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4f32_fadd_f32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 2
   %x1 = extractelement <4 x float> %x, i32 3
   %x01 = fadd float %x1, %x0
@@ -776,21 +800,33 @@ define float @extract_extract01_v4f32_fs
 }
 
 define float @extract_extract23_v4f32_fsub_f32(<4 x float> %x) {
-; SSE3-LABEL: extract_extract23_v4f32_fsub_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    subss %xmm0, %xmm1
-; SSE3-NEXT:    movaps %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    subss %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movaps %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4f32_fsub_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 2
   %x1 = extractelement <4 x float> %x, i32 3
   %x01 = fsub float %x0, %x1
@@ -919,21 +955,34 @@ define float @extract_extract01_v8f32_fa
 }
 
 define float @extract_extract23_v8f32_fadd_f32(<8 x float> %x) {
-; SSE3-LABEL: extract_extract23_v8f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8f32_fadd_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 2
   %x1 = extractelement <8 x float> %x, i32 3
   %x01 = fadd float %x0, %x1
@@ -941,13 +990,19 @@ define float @extract_extract23_v8f32_fa
 }
 
 define float @extract_extract67_v8f32_fadd_f32(<8 x float> %x) {
-; SSE3-LABEL: extract_extract67_v8f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm1, %xmm0
-; SSE3-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract67_v8f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract67_v8f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm1, %xmm1
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
 ;
 ; AVX-LABEL: extract_extract67_v8f32_fadd_f32:
 ; AVX:       # %bb.0:
@@ -994,21 +1049,34 @@ define float @extract_extract01_v8f32_fa
 }
 
 define float @extract_extract23_v8f32_fadd_f32_commute(<8 x float> %x) {
-; SSE3-LABEL: extract_extract23_v8f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8f32_fadd_f32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 2
   %x1 = extractelement <8 x float> %x, i32 3
   %x01 = fadd float %x1, %x0
@@ -1016,13 +1084,19 @@ define float @extract_extract23_v8f32_fa
 }
 
 define float @extract_extract67_v8f32_fadd_f32_commute(<8 x float> %x) {
-; SSE3-LABEL: extract_extract67_v8f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm1, %xmm0
-; SSE3-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract67_v8f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract67_v8f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm1, %xmm1
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
 ;
 ; AVX-LABEL: extract_extract67_v8f32_fadd_f32_commute:
 ; AVX:       # %bb.0:
@@ -1187,22 +1261,35 @@ define float @extract_extract01_v8f32_fs
 }
 
 define float @extract_extract23_v8f32_fsub_f32(<8 x float> %x) {
-; SSE3-LABEL: extract_extract23_v8f32_fsub_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movaps %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    subss %xmm0, %xmm1
-; SSE3-NEXT:    movaps %xmm1, %xmm0
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movaps %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    subss %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movaps %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8f32_fsub_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 2
   %x1 = extractelement <8 x float> %x, i32 3
   %x01 = fsub float %x0, %x1

Modified: llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll?rev=359707&r1=359706&r2=359707&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll Wed May  1 10:13:35 2019
@@ -44,21 +44,34 @@ define i32 @extract_extract01_v4i32_add_
 }
 
 define i32 @extract_extract23_v4i32_add_i32(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v4i32_add_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4i32_add_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4i32_add_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x i32> %x, i32 2
   %x1 = extractelement <4 x i32> %x, i32 3
   %x01 = add i32 %x0, %x1
@@ -99,21 +112,34 @@ define i32 @extract_extract01_v4i32_add_
 }
 
 define i32 @extract_extract23_v4i32_add_i32_commute(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v4i32_add_i32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4i32_add_i32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4i32_add_i32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x i32> %x, i32 2
   %x1 = extractelement <4 x i32> %x, i32 3
   %x01 = add i32 %x1, %x0
@@ -157,21 +183,35 @@ define i16 @extract_extract01_v8i16_add_
 }
 
 define i16 @extract_extract45_v8i16_add_i16(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract45_v8i16_add_i16:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE3-NEXT:    pextrw $5, %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract45_v8i16_add_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pextrw $4, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $5, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract45_v8i16_add_i16:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract45_v8i16_add_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract45_v8i16_add_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpextrw $4, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract45_v8i16_add_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i16> %x, i32 4
   %x1 = extractelement <8 x i16> %x, i32 5
   %x01 = add i16 %x0, %x1
@@ -215,21 +255,35 @@ define i16 @extract_extract01_v8i16_add_
 }
 
 define i16 @extract_extract45_v8i16_add_i16_commute(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract45_v8i16_add_i16_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE3-NEXT:    pextrw $5, %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract45_v8i16_add_i16_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pextrw $4, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $5, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract45_v8i16_add_i16_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract45_v8i16_add_i16_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract45_v8i16_add_i16_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpextrw $4, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract45_v8i16_add_i16_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i16> %x, i32 4
   %x1 = extractelement <8 x i16> %x, i32 5
   %x01 = add i16 %x1, %x0
@@ -270,21 +324,34 @@ define i32 @extract_extract01_v4i32_sub_
 }
 
 define i32 @extract_extract23_v4i32_sub_i32(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v4i32_sub_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %eax
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    subl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v4i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v4i32_sub_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %eax
-; AVX-NEXT:    vextractps $3, %xmm0, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v4i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v4i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %eax
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v4i32_sub_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x i32> %x, i32 2
   %x1 = extractelement <4 x i32> %x, i32 3
   %x01 = sub i32 %x0, %x1
@@ -371,21 +438,35 @@ define i16 @extract_extract01_v8i16_sub_
 }
 
 define i16 @extract_extract23_v8i16_sub_i16(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract23_v8i16_sub_i16:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pextrw $2, %xmm0, %eax
-; SSE3-NEXT:    pextrw $3, %xmm0, %ecx
-; SSE3-NEXT:    subl %ecx, %eax
-; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8i16_sub_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-SLOW-NEXT:    pextrw $3, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8i16_sub_i16:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX-NEXT:    vpextrw $3, %xmm0, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8i16_sub_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8i16_sub_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-SLOW-NEXT:    vpextrw $3, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8i16_sub_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i16> %x, i32 2
   %x1 = extractelement <8 x i16> %x, i32 3
   %x01 = sub i16 %x0, %x1
@@ -474,22 +555,36 @@ define i32 @extract_extract01_v8i32_add_
 }
 
 define i32 @extract_extract23_v8i32_add_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v8i32_add_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8i32_add_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8i32_add_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 2
   %x1 = extractelement <8 x i32> %x, i32 3
   %x01 = add i32 %x0, %x1
@@ -497,23 +592,54 @@ define i32 @extract_extract23_v8i32_add_
 }
 
 define i32 @extract_extract67_v8i32_add_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract67_v8i32_add_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract67_v8i32_add_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract67_v8i32_add_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract67_v8i32_add_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm1, %xmm1
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract67_v8i32_add_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: extract_extract67_v8i32_add_i32:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-FAST-NEXT:    vzeroupper
+; AVX1-FAST-NEXT:    retq
+;
+; AVX2-FAST-LABEL: extract_extract67_v8i32_add_i32:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: extract_extract67_v8i32_add_i32:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX512-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-FAST-NEXT:    vzeroupper
+; AVX512-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 6
   %x1 = extractelement <8 x i32> %x, i32 7
   %x01 = add i32 %x0, %x1
@@ -556,22 +682,36 @@ define i32 @extract_extract01_v8i32_add_
 }
 
 define i32 @extract_extract23_v8i32_add_i32_commute(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v8i32_add_i32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8i32_add_i32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8i32_add_i32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 2
   %x1 = extractelement <8 x i32> %x, i32 3
   %x01 = add i32 %x1, %x0
@@ -579,23 +719,54 @@ define i32 @extract_extract23_v8i32_add_
 }
 
 define i32 @extract_extract67_v8i32_add_i32_commute(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract67_v8i32_add_i32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract67_v8i32_add_i32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract67_v8i32_add_i32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vextractps $2, %xmm0, %ecx
-; AVX-NEXT:    vextractps $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract67_v8i32_add_i32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddd %xmm1, %xmm1
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract67_v8i32_add_i32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: extract_extract67_v8i32_add_i32_commute:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-FAST-NEXT:    vzeroupper
+; AVX1-FAST-NEXT:    retq
+;
+; AVX2-FAST-LABEL: extract_extract67_v8i32_add_i32_commute:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: extract_extract67_v8i32_add_i32_commute:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT:    vphaddd %xmm0, %xmm0, %xmm0
+; AVX512-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-FAST-NEXT:    vzeroupper
+; AVX512-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 6
   %x1 = extractelement <8 x i32> %x, i32 7
   %x01 = add i32 %x1, %x0
@@ -641,22 +812,37 @@ define i16 @extract_extract01_v16i16_add
 }
 
 define i16 @extract_extract23_v16i16_add_i16(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract23_v16i16_add_i16:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pextrw $2, %xmm0, %ecx
-; SSE3-NEXT:    pextrw $3, %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v16i16_add_i16:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pextrw $2, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $3, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v16i16_add_i16:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrw $2, %xmm0, %ecx
-; AVX-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v16i16_add_i16:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pextrw $1, %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v16i16_add_i16:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpextrw $2, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $3, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v16i16_add_i16:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrw $1, %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <16 x i16> %x, i32 2
   %x1 = extractelement <16 x i16> %x, i32 3
   %x01 = add i16 %x0, %x1
@@ -783,22 +969,37 @@ define i16 @extract_extract01_v16i16_add
 }
 
 define i16 @extract_extract45_v16i16_add_i16_commute(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract45_v16i16_add_i16_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pextrw $4, %xmm0, %ecx
-; SSE3-NEXT:    pextrw $5, %xmm0, %eax
-; SSE3-NEXT:    addl %ecx, %eax
-; SSE3-NEXT:    # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract45_v16i16_add_i16_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pextrw $4, %xmm0, %ecx
+; SSE3-SLOW-NEXT:    pextrw $5, %xmm0, %eax
+; SSE3-SLOW-NEXT:    addl %ecx, %eax
+; SSE3-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract45_v16i16_add_i16_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrw $4, %xmm0, %ecx
-; AVX-NEXT:    vpextrw $5, %xmm0, %eax
-; AVX-NEXT:    addl %ecx, %eax
-; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract45_v16i16_add_i16_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pextrw $2, %xmm0, %eax
+; SSE3-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract45_v16i16_add_i16_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpextrw $4, %xmm0, %ecx
+; AVX-SLOW-NEXT:    vpextrw $5, %xmm0, %eax
+; AVX-SLOW-NEXT:    addl %ecx, %eax
+; AVX-SLOW-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract45_v16i16_add_i16_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrw $2, %xmm0, %eax
+; AVX-FAST-NEXT:    # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <16 x i16> %x, i32 4
   %x1 = extractelement <16 x i16> %x, i32 5
   %x01 = add i16 %x1, %x0
@@ -922,22 +1123,36 @@ define i32 @extract_extract01_v8i32_sub_
 }
 
 define i32 @extract_extract23_v8i32_sub_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract23_v8i32_sub_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; SSE3-NEXT:    movd %xmm1, %eax
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    subl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract23_v8i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm1, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract23_v8i32_sub_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractps $2, %xmm0, %eax
-; AVX-NEXT:    vextractps $3, %xmm0, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract23_v8i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract23_v8i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %eax
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract23_v8i32_sub_i32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 2
   %x1 = extractelement <8 x i32> %x, i32 3
   %x01 = sub i32 %x0, %x1
@@ -945,23 +1160,54 @@ define i32 @extract_extract23_v8i32_sub_
 }
 
 define i32 @extract_extract67_v8i32_sub_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract67_v8i32_sub_i32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
-; SSE3-NEXT:    movd %xmm0, %eax
-; SSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
-; SSE3-NEXT:    movd %xmm0, %ecx
-; SSE3-NEXT:    subl %ecx, %eax
-; SSE3-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract67_v8i32_sub_i32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE3-SLOW-NEXT:    movd %xmm0, %eax
+; SSE3-SLOW-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSE3-SLOW-NEXT:    movd %xmm0, %ecx
+; SSE3-SLOW-NEXT:    subl %ecx, %eax
+; SSE3-SLOW-NEXT:    retq
 ;
-; AVX-LABEL: extract_extract67_v8i32_sub_i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT:    vextractps $2, %xmm0, %eax
-; AVX-NEXT:    vextractps $3, %xmm0, %ecx
-; AVX-NEXT:    subl %ecx, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-FAST-LABEL: extract_extract67_v8i32_sub_i32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    phsubd %xmm1, %xmm1
+; SSE3-FAST-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
+; SSE3-FAST-NEXT:    movd %xmm0, %eax
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract67_v8i32_sub_i32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX-SLOW-NEXT:    vextractps $2, %xmm0, %eax
+; AVX-SLOW-NEXT:    vextractps $3, %xmm0, %ecx
+; AVX-SLOW-NEXT:    subl %ecx, %eax
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX1-FAST-LABEL: extract_extract67_v8i32_sub_i32:
+; AVX1-FAST:       # %bb.0:
+; AVX1-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1-FAST-NEXT:    vzeroupper
+; AVX1-FAST-NEXT:    retq
+;
+; AVX2-FAST-LABEL: extract_extract67_v8i32_sub_i32:
+; AVX2-FAST:       # %bb.0:
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX2-FAST-NEXT:    vzeroupper
+; AVX2-FAST-NEXT:    retq
+;
+; AVX512-FAST-LABEL: extract_extract67_v8i32_sub_i32:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT:    vphsubd %xmm0, %xmm0, %xmm0
+; AVX512-FAST-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX512-FAST-NEXT:    vzeroupper
+; AVX512-FAST-NEXT:    retq
   %x0 = extractelement <8 x i32> %x, i32 6
   %x1 = extractelement <8 x i32> %x, i32 7
   %x01 = sub i32 %x0, %x1




More information about the llvm-commits mailing list