[llvm] r351093 - [x86] lower extracted add/sub to horizontal vector math
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 14 10:44:03 PST 2019
Author: spatel
Date: Mon Jan 14 10:44:02 2019
New Revision: 351093
URL: http://llvm.org/viewvc/llvm-project?rev=351093&view=rev
Log:
[x86] lower extracted add/sub to horizontal vector math
add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
This is the integer sibling to D56011.
There's an additional restriction to only to do this transform in the
case where we don't have extra extracts from the source vector. Without
that, we can fail to match larger horizontal patterns that are more
beneficial than this minimal case. An improvement to the more general
h-op lowering may allow us to remove the restriction here in a follow-up.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll
llvm/trunk/test/CodeGen/X86/phaddsub-undef.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=351093&r1=351092&r2=351093&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jan 14 10:44:02 2019
@@ -1013,6 +1013,12 @@ X86TargetLowering::X86TargetLowering(con
setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
+
+ // These might be better off as horizontal vector ops.
+ setOperationAction(ISD::ADD, MVT::i16, Custom);
+ setOperationAction(ISD::ADD, MVT::i32, Custom);
+ setOperationAction(ISD::SUB, MVT::i16, Custom);
+ setOperationAction(ISD::SUB, MVT::i32, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
@@ -18487,21 +18493,28 @@ static bool shouldUseHorizontalOp(bool I
/// Depending on uarch and/or optimizing for size, we might prefer to use a
/// vector operation in place of the typical scalar operation.
-static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
- const X86Subtarget &Subtarget) {
- MVT VT = Op.getSimpleValueType();
- assert((VT == MVT::f32 || VT == MVT::f64) && "Only expecting float/double");
-
+static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
// If both operands have other uses, this is probably not profitable.
- // Horizontal FP add/sub were added with SSE3.
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
- if ((!LHS.hasOneUse() && !RHS.hasOneUse()) || !Subtarget.hasSSE3())
+ if (!LHS.hasOneUse() && !RHS.hasOneUse())
return Op;
+ // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
+ bool IsFP = Op.getSimpleValueType().isFloatingPoint();
+ if (IsFP && !Subtarget.hasSSE3())
+ return Op;
+ if (!IsFP && !Subtarget.hasSSSE3())
+ return Op;
+
+ // Defer forming the minimal horizontal op if the vector source has more than
+ // the 2 extract element uses that we're matching here. In that case, we might
+ // form a horizontal op that includes more than 1 add/sub op.
if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- LHS.getOperand(0) != RHS.getOperand(0))
+ LHS.getOperand(0) != RHS.getOperand(0) ||
+ !LHS.getOperand(0)->hasNUsesOfValue(2, 0))
return Op;
if (!isa<ConstantSDNode>(LHS.getOperand(1)) ||
@@ -18510,13 +18523,23 @@ static SDValue lowerFaddFsub(SDValue Op,
return Op;
// Allow commuted 'hadd' ops.
- // TODO: Allow commuted fsub by negating the result of FHSUB?
- // TODO: This can be extended to handle other adjacent extract pairs.
- auto HOpcode = Op.getOpcode() == ISD::FADD ? X86ISD::FHADD : X86ISD::FHSUB;
+ // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
+ unsigned HOpcode;
+ switch (Op.getOpcode()) {
+ case ISD::ADD: HOpcode = X86ISD::HADD; break;
+ case ISD::SUB: HOpcode = X86ISD::HSUB; break;
+ case ISD::FADD: HOpcode = X86ISD::FHADD; break;
+ case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
+ default:
+ llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
+ }
unsigned LExtIndex = LHS.getConstantOperandVal(1);
unsigned RExtIndex = RHS.getConstantOperandVal(1);
- if (LExtIndex == 1 && RExtIndex == 0 && HOpcode == X86ISD::FHADD)
+ if (LExtIndex == 1 && RExtIndex == 0 &&
+ (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
std::swap(LExtIndex, RExtIndex);
+
+ // TODO: This can be extended to handle other adjacent extract pairs.
if (LExtIndex != 0 || RExtIndex != 1)
return Op;
@@ -18533,15 +18556,23 @@ static SDValue lowerFaddFsub(SDValue Op,
if (BitWidth == 256 || BitWidth == 512)
X = extract128BitVector(X, 0, DAG, DL);
- // fadd (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
- // fadd (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
- // fsub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
- // The extract of element 0 is free: the scalar result is element 0.
+ // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
+ // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
+ // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
- return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, HOp,
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
DAG.getIntPtrConstant(0, DL));
}
+/// Depending on uarch and/or optimizing for size, we might prefer to use a
+/// vector operation in place of the typical scalar operation.
+static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ MVT VT = Op.getSimpleValueType();
+ assert(VT == MVT::f32 || VT == MVT::f64 && "Only expecting float/double");
+ return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
+}
+
/// The only differences between FABS and FNEG are the mask and the logic op.
/// FNEG also has a folding opportunity for FNEG(FABS(x)).
static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
@@ -23479,11 +23510,16 @@ static SDValue split512IntArith(SDValue
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
-static SDValue LowerADD_SUB(SDValue Op, SelectionDAG &DAG) {
+static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
+ if (VT == MVT::i16 || VT == MVT::i32)
+ return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
+
if (VT.getScalarType() == MVT::i1)
return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
Op.getOperand(0), Op.getOperand(1));
+
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
@@ -26219,7 +26255,7 @@ SDValue X86TargetLowering::LowerOperatio
case ISD::ADDCARRY:
case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
case ISD::ADD:
- case ISD::SUB: return LowerADD_SUB(Op, DAG);
+ case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
case ISD::UADDSAT:
case ISD::SADDSAT:
case ISD::USUBSAT:
Modified: llvm/trunk/test/CodeGen/X86/haddsub-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/haddsub-undef.ll?rev=351093&r1=351092&r2=351093&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/haddsub-undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/haddsub-undef.ll Mon Jan 14 10:44:02 2019
@@ -186,48 +186,27 @@ define <4 x float> @test7_undef(<4 x flo
}
define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) {
-; SSE-SLOW-LABEL: test8_undef:
-; SSE-SLOW: # %bb.0:
-; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE-SLOW-NEXT: addss %xmm0, %xmm1
-; SSE-SLOW-NEXT: movaps %xmm0, %xmm2
-; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-SLOW-NEXT: addss %xmm2, %xmm0
-; SSE-SLOW-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-SLOW-NEXT: movaps %xmm1, %xmm0
-; SSE-SLOW-NEXT: retq
-;
-; SSE-FAST-LABEL: test8_undef:
-; SSE-FAST: # %bb.0:
-; SSE-FAST-NEXT: movaps %xmm0, %xmm1
-; SSE-FAST-NEXT: haddps %xmm0, %xmm1
-; SSE-FAST-NEXT: movaps %xmm0, %xmm2
-; SSE-FAST-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; SSE-FAST-NEXT: addss %xmm2, %xmm0
-; SSE-FAST-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE-FAST-NEXT: movaps %xmm1, %xmm0
-; SSE-FAST-NEXT: retq
+; SSE-LABEL: test8_undef:
+; SSE: # %bb.0:
+; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE-NEXT: addss %xmm0, %xmm1
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE-NEXT: addss %xmm2, %xmm0
+; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
-; AVX-SLOW-LABEL: test8_undef:
-; AVX-SLOW: # %bb.0:
-; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
-; AVX-SLOW-NEXT: retq
-;
-; AVX-FAST-LABEL: test8_undef:
-; AVX-FAST: # %bb.0:
-; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
-; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-FAST-NEXT: retq
+; AVX-LABEL: test8_undef:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
%vecext = extractelement <4 x float> %a, i32 0
%vecext1 = extractelement <4 x float> %a, i32 1
%add = fadd float %vecext, %vecext1
@@ -382,40 +361,23 @@ define <16 x float> @test13_v16f32_undef
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
-; AVX512-SLOW-LABEL: test13_v16f32_undef:
-; AVX512-SLOW: # %bb.0:
-; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX512-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; AVX512-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; AVX512-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm2
-; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-SLOW-NEXT: retq
-;
-; AVX512-FAST-LABEL: test13_v16f32_undef:
-; AVX512-FAST: # %bb.0:
-; AVX512-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
-; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; AVX512-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
-; AVX512-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX512-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm2
-; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; AVX512-FAST-NEXT: retq
+; AVX512-LABEL: test13_v16f32_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
+; AVX512-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm2
+; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
+; AVX512-NEXT: retq
%vecext = extractelement <16 x float> %a, i32 0
%vecext1 = extractelement <16 x float> %a, i32 1
%add1 = fadd float %vecext, %vecext1
Modified: llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll?rev=351093&r1=351092&r2=351093&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub-extract.ll Mon Jan 14 10:44:02 2019
@@ -11,20 +11,32 @@
; 128-bit vectors, 16/32-bit, add/sub
define i32 @extract_extract_v4i32_add_i32(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract_v4i32_add_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4i32_add_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 0
%x1 = extractelement <4 x i32> %x, i32 1
%x01 = add i32 %x0, %x1
@@ -32,20 +44,32 @@ define i32 @extract_extract_v4i32_add_i3
}
define i32 @extract_extract_v4i32_add_i32_commute(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract_v4i32_add_i32_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4i32_add_i32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 0
%x1 = extractelement <4 x i32> %x, i32 1
%x01 = add i32 %x1, %x0
@@ -53,21 +77,35 @@ define i32 @extract_extract_v4i32_add_i3
}
define i16 @extract_extract_v8i16_add_i16(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract_v8i16_add_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i16_add_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_add_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i16> %x, i32 0
%x1 = extractelement <8 x i16> %x, i32 1
%x01 = add i16 %x0, %x1
@@ -75,21 +113,35 @@ define i16 @extract_extract_v8i16_add_i1
}
define i16 @extract_extract_v8i16_add_i16_commute(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract_v8i16_add_i16_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i16_add_i16_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_add_i16_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i16> %x, i32 0
%x1 = extractelement <8 x i16> %x, i32 1
%x01 = add i16 %x1, %x0
@@ -97,20 +149,32 @@ define i16 @extract_extract_v8i16_add_i1
}
define i32 @extract_extract_v4i32_sub_i32(<4 x i32> %x) {
-; SSE3-LABEL: extract_extract_v4i32_sub_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4i32_sub_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_sub_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 0
%x1 = extractelement <4 x i32> %x, i32 1
%x01 = sub i32 %x0, %x1
@@ -139,21 +203,35 @@ define i32 @extract_extract_v4i32_sub_i3
}
define i16 @extract_extract_v8i16_sub_i16(<8 x i16> %x) {
-; SSE3-LABEL: extract_extract_v8i16_sub_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pextrw $1, %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i16_sub_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrw $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i16_sub_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i16> %x, i32 0
%x1 = extractelement <8 x i16> %x, i32 1
%x01 = sub i16 %x0, %x1
@@ -185,21 +263,34 @@ define i16 @extract_extract_v8i16_sub_i1
; 256-bit vectors, i32/i16, add/sub
define i32 @extract_extract_v8i32_add_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract_v8i32_add_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i32_add_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_add_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i32> %x, i32 0
%x1 = extractelement <8 x i32> %x, i32 1
%x01 = add i32 %x0, %x1
@@ -207,21 +298,34 @@ define i32 @extract_extract_v8i32_add_i3
}
define i32 @extract_extract_v8i32_add_i32_commute(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract_v8i32_add_i32_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i32_add_i32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_add_i32_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i32> %x, i32 0
%x1 = extractelement <8 x i32> %x, i32 1
%x01 = add i32 %x1, %x0
@@ -229,22 +333,37 @@ define i32 @extract_extract_v8i32_add_i3
}
define i16 @extract_extract_v16i16_add_i16(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract_v16i16_add_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i16_add_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_add_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i16> %x, i32 0
%x1 = extractelement <16 x i16> %x, i32 1
%x01 = add i16 %x0, %x1
@@ -252,22 +371,37 @@ define i16 @extract_extract_v16i16_add_i
}
define i16 @extract_extract_v16i16_add_i16_commute(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract_v16i16_add_i16_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i16_add_i16_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_add_i16_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i16> %x, i32 0
%x1 = extractelement <16 x i16> %x, i32 1
%x01 = add i16 %x1, %x0
@@ -275,21 +409,34 @@ define i16 @extract_extract_v16i16_add_i
}
define i32 @extract_extract_v8i32_sub_i32(<8 x i32> %x) {
-; SSE3-LABEL: extract_extract_v8i32_sub_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v8i32_sub_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v8i32_sub_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <8 x i32> %x, i32 0
%x1 = extractelement <8 x i32> %x, i32 1
%x01 = sub i32 %x0, %x1
@@ -321,22 +468,37 @@ define i32 @extract_extract_v8i32_sub_i3
}
define i16 @extract_extract_v16i16_sub_i16(<16 x i16> %x) {
-; SSE3-LABEL: extract_extract_v16i16_sub_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pextrw $1, %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i16_sub_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrw $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i16_sub_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i16> %x, i32 0
%x1 = extractelement <16 x i16> %x, i32 1
%x01 = sub i16 %x0, %x1
@@ -371,21 +533,34 @@ define i16 @extract_extract_v16i16_sub_i
; 512-bit vectors, i32/i16, add/sub
define i32 @extract_extract_v16i32_add_i32(<16 x i32> %x) {
-; SSE3-LABEL: extract_extract_v16i32_add_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i32_add_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_add_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i32> %x, i32 0
%x1 = extractelement <16 x i32> %x, i32 1
%x01 = add i32 %x0, %x1
@@ -393,21 +568,34 @@ define i32 @extract_extract_v16i32_add_i
}
define i32 @extract_extract_v16i32_add_i32_commute(<16 x i32> %x) {
-; SSE3-LABEL: extract_extract_v16i32_add_i32_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i32_add_i32_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_add_i32_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i32> %x, i32 0
%x1 = extractelement <16 x i32> %x, i32 1
%x01 = add i32 %x1, %x0
@@ -415,22 +603,37 @@ define i32 @extract_extract_v16i32_add_i
}
define i16 @extract_extract_v32i16_add_i16(<32 x i16> %x) {
-; SSE3-LABEL: extract_extract_v32i16_add_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v32i16_add_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_add_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <32 x i16> %x, i32 0
%x1 = extractelement <32 x i16> %x, i32 1
%x01 = add i16 %x0, %x1
@@ -438,22 +641,37 @@ define i16 @extract_extract_v32i16_add_i
}
define i16 @extract_extract_v32i16_add_i16_commute(<32 x i16> %x) {
-; SSE3-LABEL: extract_extract_v32i16_add_i16_commute:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pextrw $1, %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v32i16_add_i16_commute:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phaddw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_add_i16_commute:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphaddw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <32 x i16> %x, i32 0
%x1 = extractelement <32 x i16> %x, i32 1
%x01 = add i16 %x1, %x0
@@ -461,21 +679,34 @@ define i16 @extract_extract_v32i16_add_i
}
define i32 @extract_extract_v16i32_sub_i32(<16 x i32> %x) {
-; SSE3-LABEL: extract_extract_v16i32_sub_i32:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v16i32_sub_i32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v16i32_sub_i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <16 x i32> %x, i32 0
%x1 = extractelement <16 x i32> %x, i32 1
%x01 = sub i32 %x0, %x1
@@ -505,22 +736,37 @@ define i32 @extract_extract_v16i32_sub_i
}
define i16 @extract_extract_v32i16_sub_i16(<32 x i16> %x) {
-; SSE3-LABEL: extract_extract_v32i16_sub_i16:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: pextrw $1, %xmm0, %ecx
-; SSE3-NEXT: subl %ecx, %eax
-; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v32i16_sub_i16:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: vpextrw $1, %xmm0, %ecx
-; AVX-NEXT: subl %ecx, %eax
-; AVX-NEXT: # kill: def $ax killed $ax killed $eax
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: pextrw $1, %xmm0, %ecx
+; SSE3-SLOW-NEXT: subl %ecx, %eax
+; SSE3-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: phsubw %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX-SLOW-NEXT: vpextrw $1, %xmm0, %ecx
+; AVX-SLOW-NEXT: subl %ecx, %eax
+; AVX-SLOW-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-SLOW-NEXT: vzeroupper
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v32i16_sub_i16:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vphsubw %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: # kill: def $ax killed $ax killed $eax
+; AVX-FAST-NEXT: vzeroupper
+; AVX-FAST-NEXT: retq
%x0 = extractelement <32 x i16> %x, i32 0
%x1 = extractelement <32 x i16> %x, i32 1
%x01 = sub i16 %x0, %x1
@@ -553,22 +799,36 @@ define i16 @extract_extract_v32i16_sub_i
; Check output when 1 or both extracts have extra uses.
define i32 @extract_extract_v4i32_add_i32_uses1(<4 x i32> %x, i32* %p) {
-; SSE3-LABEL: extract_extract_v4i32_add_i32_uses1:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: movd %xmm0, (%rdi)
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4i32_add_i32_uses1:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vmovd %xmm0, (%rdi)
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: movd %xmm0, (%rdi)
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: movd %xmm0, (%rdi)
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vmovd %xmm0, (%rdi)
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses1:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vmovd %xmm0, (%rdi)
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 0
store i32 %x0, i32* %p
%x1 = extractelement <4 x i32> %x, i32 1
@@ -577,22 +837,37 @@ define i32 @extract_extract_v4i32_add_i3
}
define i32 @extract_extract_v4i32_add_i32_uses2(<4 x i32> %x, i32* %p) {
-; SSE3-LABEL: extract_extract_v4i32_add_i32_uses2:
-; SSE3: # %bb.0:
-; SSE3-NEXT: movd %xmm0, %ecx
-; SSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: addl %ecx, %eax
-; SSE3-NEXT: movd %xmm0, (%rdi)
-; SSE3-NEXT: retq
-;
-; AVX-LABEL: extract_extract_v4i32_add_i32_uses2:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovd %xmm0, %ecx
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: addl %ecx, %eax
-; AVX-NEXT: vpextrd $1, %xmm0, (%rdi)
-; AVX-NEXT: retq
+; SSE3-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+; SSE3-SLOW: # %bb.0:
+; SSE3-SLOW-NEXT: movd %xmm0, %ecx
+; SSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE3-SLOW-NEXT: movd %xmm0, %eax
+; SSE3-SLOW-NEXT: addl %ecx, %eax
+; SSE3-SLOW-NEXT: movd %xmm0, (%rdi)
+; SSE3-SLOW-NEXT: retq
+;
+; SSE3-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; SSE3-FAST: # %bb.0:
+; SSE3-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
+; SSE3-FAST-NEXT: movd %xmm1, (%rdi)
+; SSE3-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE3-FAST-NEXT: movd %xmm0, %eax
+; SSE3-FAST-NEXT: retq
+;
+; AVX-SLOW-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vmovd %xmm0, %ecx
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, %eax
+; AVX-SLOW-NEXT: addl %ecx, %eax
+; AVX-SLOW-NEXT: vpextrd $1, %xmm0, (%rdi)
+; AVX-SLOW-NEXT: retq
+;
+; AVX-FAST-LABEL: extract_extract_v4i32_add_i32_uses2:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vpextrd $1, %xmm0, (%rdi)
+; AVX-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX-FAST-NEXT: vmovd %xmm0, %eax
+; AVX-FAST-NEXT: retq
%x0 = extractelement <4 x i32> %x, i32 0
%x1 = extractelement <4 x i32> %x, i32 1
store i32 %x1, i32* %p
Modified: llvm/trunk/test/CodeGen/X86/phaddsub-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/phaddsub-undef.ll?rev=351093&r1=351092&r2=351093&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/phaddsub-undef.ll (original)
+++ llvm/trunk/test/CodeGen/X86/phaddsub-undef.ll Mon Jan 14 10:44:02 2019
@@ -43,35 +43,51 @@ define <8 x i32> @test14_undef(<8 x i32>
; integer horizontal adds instead of two scalar adds followed by vector inserts.
define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
-; SSE-LABEL: test15_undef:
-; SSE: # %bb.0:
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE-NEXT: movd %xmm0, %ecx
-; SSE-NEXT: addl %eax, %ecx
-; SSE-NEXT: movd %xmm3, %eax
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
-; SSE-NEXT: movd %xmm0, %edx
-; SSE-NEXT: addl %eax, %edx
-; SSE-NEXT: movd %ecx, %xmm0
-; SSE-NEXT: movd %edx, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; SSE-NEXT: retq
+; SSE-SLOW-LABEL: test15_undef:
+; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movd %xmm0, %eax
+; SSE-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-SLOW-NEXT: movd %xmm0, %ecx
+; SSE-SLOW-NEXT: addl %eax, %ecx
+; SSE-SLOW-NEXT: movd %xmm3, %eax
+; SSE-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,3]
+; SSE-SLOW-NEXT: movd %xmm0, %edx
+; SSE-SLOW-NEXT: addl %eax, %edx
+; SSE-SLOW-NEXT: movd %ecx, %xmm0
+; SSE-SLOW-NEXT: movd %edx, %xmm1
+; SSE-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; SSE-SLOW-NEXT: retq
;
-; AVX1-LABEL: test15_undef:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: vpextrd $1, %xmm0, %edx
-; AVX1-NEXT: addl %eax, %edx
-; AVX1-NEXT: vmovd %ecx, %xmm0
-; AVX1-NEXT: vmovd %edx, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
+; SSE-FAST-LABEL: test15_undef:
+; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: phaddd %xmm0, %xmm0
+; SSE-FAST-NEXT: phaddd %xmm3, %xmm3
+; SSE-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
+; SSE-FAST-NEXT: retq
+;
+; AVX1-SLOW-LABEL: test15_undef:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT: vpextrd $1, %xmm0, %ecx
+; AVX1-SLOW-NEXT: addl %eax, %ecx
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm0
+; AVX1-SLOW-NEXT: vmovd %xmm0, %eax
+; AVX1-SLOW-NEXT: vpextrd $1, %xmm0, %edx
+; AVX1-SLOW-NEXT: addl %eax, %edx
+; AVX1-SLOW-NEXT: vmovd %ecx, %xmm0
+; AVX1-SLOW-NEXT: vmovd %edx, %xmm1
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test15_undef:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test15_undef:
; AVX2: # %bb.0:
More information about the llvm-commits
mailing list