[llvm] r350369 - [x86] lower extracted fadd/fsub to horizontal vector math

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 3 15:16:19 PST 2019


Author: spatel
Date: Thu Jan  3 15:16:19 2019
New Revision: 350369

URL: http://llvm.org/viewvc/llvm-project?rev=350369&view=rev
Log:
[x86] lower extracted fadd/fsub to horizontal vector math

This would show up if we fix horizontal reductions to narrow as they go along, 
but it's an improvement for size and/or Jaguar (fast-hops) independent of that.

We need to do this late to not interfere with other pattern matching of larger 
horizontal sequences.

We can extend this to integer ops in a follow-up patch.

Differential Revision: https://reviews.llvm.org/D56011

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
    llvm/trunk/test/CodeGen/X86/haddsub.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=350369&r1=350368&r2=350369&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Jan  3 15:16:19 2019
@@ -540,6 +540,10 @@ X86TargetLowering::X86TargetLowering(con
       // Use ANDPD and ORPD to simulate FCOPYSIGN.
       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
 
+      // These might be better off as horizontal vector ops.
+      setOperationAction(ISD::FADD, VT, Custom);
+      setOperationAction(ISD::FSUB, VT, Custom);
+
       // We don't support sin/cos/fmod
       setOperationAction(ISD::FSIN   , VT, Expand);
       setOperationAction(ISD::FCOS   , VT, Expand);
@@ -18335,6 +18339,63 @@ static bool shouldUseHorizontalOp(bool I
   return !IsSingleSource || IsOptimizingSize || HasFastHOps;
 }
 
+/// Depending on uarch and/or optimizing for size, we might prefer to use a
+/// vector operation in place of the typical scalar operation.
+static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
+                             const X86Subtarget &Subtarget) {
+  MVT VT = Op.getSimpleValueType();
+  assert(VT == MVT::f32 || VT == MVT::f64 && "Only expecting float/double");
+
+  // If both operands have other uses, this is probably not profitable.
+  // Horizontal FP add/sub were added with SSE3.
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  if ((!LHS.hasOneUse() && !RHS.hasOneUse()) || !Subtarget.hasSSE3())
+    return Op;
+
+  if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+      RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
+      LHS.getOperand(0) != RHS.getOperand(0))
+    return Op;
+
+  if (!isa<ConstantSDNode>(LHS.getOperand(1)) ||
+      !isa<ConstantSDNode>(RHS.getOperand(1)) ||
+      !shouldUseHorizontalOp(true, DAG, Subtarget))
+    return Op;
+
+  // Allow commuted 'hadd' ops.
+  // TODO: Allow commuted fsub by negating the result of FHSUB?
+  // TODO: This can be extended to handle other adjacent extract pairs.
+  auto HOpcode = Op.getOpcode() == ISD::FADD ? X86ISD::FHADD : X86ISD::FHSUB;
+  unsigned LExtIndex = LHS.getConstantOperandVal(1);
+  unsigned RExtIndex = RHS.getConstantOperandVal(1);
+  if (LExtIndex == 1 && RExtIndex == 0 && HOpcode == X86ISD::FHADD)
+    std::swap(LExtIndex, RExtIndex);
+  if (LExtIndex != 0 || RExtIndex != 1)
+    return Op;
+
+  SDValue X = LHS.getOperand(0);
+  EVT VecVT = X.getValueType();
+  unsigned BitWidth = VecVT.getSizeInBits();
+  assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
+         "Not expecting illegal vector widths here");
+
+  // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
+  // equivalent, so extract the 256/512-bit source op to 128-bit.
+  // This is free: ymm/zmm -> xmm.
+  SDLoc DL(Op);
+  if (BitWidth == 256 || BitWidth == 512)
+    X = extract128BitVector(X, 0, DAG, DL);
+
+  // fadd (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
+  // fadd (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
+  // fsub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
+  // The extract of element 0 is free: the scalar result is element 0.
+  SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, HOp,
+                     DAG.getIntPtrConstant(0, DL));
+}
+
 /// The only differences between FABS and FNEG are the mask and the logic op.
 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
@@ -26015,6 +26076,8 @@ SDValue X86TargetLowering::LowerOperatio
   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
+  case ISD::FADD:
+  case ISD::FSUB:               return lowerFaddFsub(Op, DAG, Subtarget);
   case ISD::FABS:
   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);

Modified: llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-undef.ll?rev=350369&r1=350368&r2=350369&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-undef.ll Thu Jan  3 15:16:19 2019
@@ -84,17 +84,27 @@ define <4 x float> @test3_undef(<4 x flo
 }
 
 define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: test4_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT:    addss %xmm1, %xmm0
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: test4_undef:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE-SLOW-LABEL: test4_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test4_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: test4_undef:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: test4_undef:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -103,19 +113,29 @@ define <4 x float> @test4_undef(<4 x flo
 }
 
 define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) {
-; SSE-LABEL: test5_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movapd %xmm0, %xmm1
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE-NEXT:    addsd %xmm0, %xmm1
-; SSE-NEXT:    movapd %xmm1, %xmm0
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: test5_undef:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE-SLOW-LABEL: test5_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test5_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: test5_undef:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: test5_undef:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %vecext = extractelement <2 x double> %a, i32 0
   %vecext1 = extractelement <2 x double> %a, i32 1
   %add = fadd double %vecext, %vecext1
@@ -166,27 +186,48 @@ define <4 x float> @test7_undef(<4 x flo
 }
 
 define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: test8_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT:    addss %xmm0, %xmm1
-; SSE-NEXT:    movaps %xmm0, %xmm2
-; SSE-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-NEXT:    addss %xmm2, %xmm0
-; SSE-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-NEXT:    movaps %xmm1, %xmm0
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: test8_undef:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm2, %xmm0
-; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-NEXT:    retq
+; SSE-SLOW-LABEL: test8_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm0, %xmm1
+; SSE-SLOW-NEXT:    movaps %xmm0, %xmm2
+; SSE-SLOW-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-SLOW-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-SLOW-NEXT:    addss %xmm2, %xmm0
+; SSE-SLOW-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-SLOW-NEXT:    movaps %xmm1, %xmm0
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test8_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    movaps %xmm0, %xmm1
+; SSE-FAST-NEXT:    haddps %xmm0, %xmm1
+; SSE-FAST-NEXT:    movaps %xmm0, %xmm2
+; SSE-FAST-NEXT:    unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-FAST-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-FAST-NEXT:    addss %xmm2, %xmm0
+; SSE-FAST-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-FAST-NEXT:    movaps %xmm1, %xmm0
+; SSE-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: test8_undef:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm1
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX-SLOW-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: test8_undef:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm1
+; AVX-FAST-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-FAST-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX-FAST-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-FAST-NEXT:    retq
   %vecext = extractelement <4 x float> %a, i32 0
   %vecext1 = extractelement <4 x float> %a, i32 1
   %add = fadd float %vecext, %vecext1
@@ -241,14 +282,21 @@ define <8 x float> @test10_undef(<8 x fl
 }
 
 define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) {
-; SSE-LABEL: test11_undef:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-NEXT:    addss %xmm1, %xmm0
-; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSE-NEXT:    addss %xmm3, %xmm1
-; SSE-NEXT:    movddup {{.*#+}} xmm1 = xmm1[0,0]
-; SSE-NEXT:    retq
+; SSE-SLOW-LABEL: test11_undef:
+; SSE-SLOW:       # %bb.0:
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSE-SLOW-NEXT:    addss %xmm3, %xmm1
+; SSE-SLOW-NEXT:    movddup {{.*#+}} xmm1 = xmm1[0,0]
+; SSE-SLOW-NEXT:    retq
+;
+; SSE-FAST-LABEL: test11_undef:
+; SSE-FAST:       # %bb.0:
+; SSE-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE-FAST-NEXT:    haddps %xmm3, %xmm3
+; SSE-FAST-NEXT:    movddup {{.*#+}} xmm1 = xmm3[0,0]
+; SSE-FAST-NEXT:    retq
 ;
 ; AVX-LABEL: test11_undef:
 ; AVX:       # %bb.0:
@@ -334,23 +382,40 @@ define <16 x float> @test13_v16f32_undef
 ; AVX1-FAST-NEXT:    vhaddps %xmm1, %xmm0, %xmm0
 ; AVX1-FAST-NEXT:    retq
 ;
-; AVX512-LABEL: test13_v16f32_undef:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; AVX512-NEXT:    vaddss %xmm3, %xmm2, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT:    vaddss %xmm2, %xmm0, %xmm2
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX512-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT:    vaddss %xmm0, %xmm2, %xmm0
-; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-NEXT:    retq
+; AVX512-SLOW-LABEL: test13_v16f32_undef:
+; AVX512-SLOW:       # %bb.0:
+; AVX512-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm1
+; AVX512-SLOW-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-SLOW-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-SLOW-NEXT:    vaddss %xmm3, %xmm2, %xmm2
+; AVX512-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX512-SLOW-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-SLOW-NEXT:    vaddss %xmm2, %xmm0, %xmm2
+; AVX512-SLOW-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-SLOW-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-SLOW-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX512-SLOW-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-SLOW-NEXT:    retq
+;
+; AVX512-FAST-LABEL: test13_v16f32_undef:
+; AVX512-FAST:       # %bb.0:
+; AVX512-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm1
+; AVX512-FAST-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-FAST-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-FAST-NEXT:    vaddss %xmm3, %xmm2, %xmm2
+; AVX512-FAST-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-FAST-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX512-FAST-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-FAST-NEXT:    vaddss %xmm2, %xmm0, %xmm2
+; AVX512-FAST-NEXT:    vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-FAST-NEXT:    vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-FAST-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-FAST-NEXT:    vaddss %xmm0, %xmm2, %xmm0
+; AVX512-FAST-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-FAST-NEXT:    retq
   %vecext = extractelement <16 x float> %a, i32 0
   %vecext1 = extractelement <16 x float> %a, i32 1
   %add1 = fadd float %vecext, %vecext1

Modified: llvm/trunk/test/CodeGen/X86/haddsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub.ll?rev=350369&r1=350368&r2=350369&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub.ll Thu Jan  3 15:16:19 2019
@@ -588,17 +588,27 @@ define <2 x float> @haddps_v2f32(<4 x fl
 ; 128-bit vectors, float/double, fadd/fsub
 
 define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 0
   %x1 = extractelement <4 x float> %x, i32 1
   %x01 = fadd float %x0, %x1
@@ -606,17 +616,27 @@ define float @extract_extract_v4f32_fadd
 }
 
 define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 0
   %x1 = extractelement <4 x float> %x, i32 1
   %x01 = fadd float %x1, %x0
@@ -624,19 +644,29 @@ define float @extract_extract_v4f32_fadd
 }
 
 define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fadd_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <2 x double> %x, i32 0
   %x1 = extractelement <2 x double> %x, i32 1
   %x01 = fadd double %x0, %x1
@@ -644,19 +674,29 @@ define double @extract_extract_v2f64_fad
 }
 
 define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <2 x double> %x, i32 0
   %x1 = extractelement <2 x double> %x, i32 1
   %x01 = fadd double %x1, %x0
@@ -664,17 +704,27 @@ define double @extract_extract_v2f64_fad
 }
 
 define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) {
-; SSE3-LABEL: extract_extract_v4f32_fsub_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    subss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f32_fsub_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 0
   %x1 = extractelement <4 x float> %x, i32 1
   %x01 = fsub float %x0, %x1
@@ -701,18 +751,28 @@ define float @extract_extract_v4f32_fsub
 }
 
 define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) {
-; SSE3-LABEL: extract_extract_v2f64_fsub_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    subsd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v2f64_fsub_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v2f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <2 x double> %x, i32 0
   %x1 = extractelement <2 x double> %x, i32 1
   %x01 = fsub double %x0, %x1
@@ -742,18 +802,29 @@ define double @extract_extract_v2f64_fsu
 ; 256-bit vectors, float/double, fadd/fsub
 
 define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) {
-; SSE3-LABEL: extract_extract_v8f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f32_fadd_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 0
   %x1 = extractelement <8 x float> %x, i32 1
   %x01 = fadd float %x0, %x1
@@ -761,18 +832,29 @@ define float @extract_extract_v8f32_fadd
 }
 
 define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) {
-; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 0
   %x1 = extractelement <8 x float> %x, i32 1
   %x01 = fadd float %x1, %x0
@@ -780,20 +862,31 @@ define float @extract_extract_v8f32_fadd
 }
 
 define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fadd_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x double> %x, i32 0
   %x1 = extractelement <4 x double> %x, i32 1
   %x01 = fadd double %x0, %x1
@@ -801,20 +894,31 @@ define double @extract_extract_v4f64_fad
 }
 
 define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x double> %x, i32 0
   %x1 = extractelement <4 x double> %x, i32 1
   %x01 = fadd double %x1, %x0
@@ -822,24 +926,37 @@ define double @extract_extract_v4f64_fad
 }
 
 define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) {
-; SSE3-LABEL: extract_extract_v8f32_fsub_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    subss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f32_fsub_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x float> %x, i32 0
   %x1 = extractelement <8 x float> %x, i32 1
   %x01 = fsub float %x0, %x1
   ret float %x01
 }
 
+; Negative test...or get hoppy and negate?
+
 define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) {
 ; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute:
 ; SSE3:       # %bb.0:
@@ -861,25 +978,38 @@ define float @extract_extract_v8f32_fsub
 }
 
 define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) {
-; SSE3-LABEL: extract_extract_v4f64_fsub_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    subsd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f64_fsub_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x double> %x, i32 0
   %x1 = extractelement <4 x double> %x, i32 1
   %x01 = fsub double %x0, %x1
   ret double %x01
 }
 
+; Negative test...or get hoppy and negate?
+
 define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) {
 ; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute:
 ; SSE3:       # %bb.0:
@@ -904,18 +1034,29 @@ define double @extract_extract_v4f64_fsu
 ; 512-bit vectors, float/double, fadd/fsub
 
 define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) {
-; SSE3-LABEL: extract_extract_v16f32_fadd_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v16f32_fadd_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <16 x float> %x, i32 0
   %x1 = extractelement <16 x float> %x, i32 1
   %x01 = fadd float %x0, %x1
@@ -923,18 +1064,29 @@ define float @extract_extract_v16f32_fad
 }
 
 define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) {
-; SSE3-LABEL: extract_extract_v16f32_fadd_f32_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v16f32_fadd_f32_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <16 x float> %x, i32 0
   %x1 = extractelement <16 x float> %x, i32 1
   %x01 = fadd float %x1, %x0
@@ -942,20 +1094,31 @@ define float @extract_extract_v16f32_fad
 }
 
 define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) {
-; SSE3-LABEL: extract_extract_v8f64_fadd_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f64_fadd_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x double> %x, i32 0
   %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fadd double %x0, %x1
@@ -963,20 +1126,31 @@ define double @extract_extract_v8f64_fad
 }
 
 define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) {
-; SSE3-LABEL: extract_extract_v8f64_fadd_f64_commute:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    addsd %xmm0, %xmm1
-; SSE3-NEXT:    movapd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f64_fadd_f64_commute:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    addsd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    movapd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    haddpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vaddsd %xmm0, %xmm1, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x double> %x, i32 0
   %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fadd double %x1, %x0
@@ -984,18 +1158,29 @@ define double @extract_extract_v8f64_fad
 }
 
 define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) {
-; SSE3-LABEL: extract_extract_v16f32_fsub_f32:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    subss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v16f32_fsub_f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    subss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v16f32_fsub_f32:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <16 x float> %x, i32 0
   %x1 = extractelement <16 x float> %x, i32 1
   %x01 = fsub float %x0, %x1
@@ -1023,19 +1208,30 @@ define float @extract_extract_v16f32_fsu
 }
 
 define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) {
-; SSE3-LABEL: extract_extract_v8f64_fsub_f64:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movapd %xmm0, %xmm1
-; SSE3-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE3-NEXT:    subsd %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v8f64_fsub_f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movapd %xmm0, %xmm1
+; SSE3-SLOW-NEXT:    unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; SSE3-SLOW-NEXT:    subsd %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    hsubpd %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX-SLOW-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    vzeroupper
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v8f64_fsub_f64:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vhsubpd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    vzeroupper
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <8 x double> %x, i32 0
   %x1 = extractelement <8 x double> %x, i32 1
   %x01 = fsub double %x0, %x1
@@ -1066,19 +1262,31 @@ define double @extract_extract_v8f64_fsu
 ; Check output when 1 or both extracts have extra uses.
 
 define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) {
-; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses1:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movss %xmm0, (%rdi)
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses1:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovss %xmm0, (%rdi)
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movss %xmm0, (%rdi)
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    movss %xmm0, (%rdi)
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 0
   store float %x0, float* %p
   %x1 = extractelement <4 x float> %x, i32 1
@@ -1087,19 +1295,32 @@ define float @extract_extract_v4f32_fadd
 }
 
 define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) {
-; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses2:
-; SSE3:       # %bb.0:
-; SSE3-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE3-NEXT:    movss %xmm1, (%rdi)
-; SSE3-NEXT:    addss %xmm1, %xmm0
-; SSE3-NEXT:    retq
-;
-; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses2:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-NEXT:    vmovss %xmm1, (%rdi)
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    retq
+; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3-SLOW:       # %bb.0:
+; SSE3-SLOW-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-SLOW-NEXT:    movss %xmm1, (%rdi)
+; SSE3-SLOW-NEXT:    addss %xmm1, %xmm0
+; SSE3-SLOW-NEXT:    retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; SSE3-FAST:       # %bb.0:
+; SSE3-FAST-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE3-FAST-NEXT:    movss %xmm1, (%rdi)
+; SSE3-FAST-NEXT:    haddps %xmm0, %xmm0
+; SSE3-FAST-NEXT:    retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-SLOW:       # %bb.0:
+; AVX-SLOW-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT:    vmovss %xmm1, (%rdi)
+; AVX-SLOW-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT:    retq
+;
+; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2:
+; AVX-FAST:       # %bb.0:
+; AVX-FAST-NEXT:    vextractps $1, %xmm0, (%rdi)
+; AVX-FAST-NEXT:    vhaddps %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT:    retq
   %x0 = extractelement <4 x float> %x, i32 0
   %x1 = extractelement <4 x float> %x, i32 1
   store float %x1, float* %p




More information about the llvm-commits mailing list