[llvm] [DAG] Add legalization handling for AVGCEIL/AVGFLOOR nodes (PR #92096)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 6 03:09:36 PDT 2024
https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/92096
>From 2edfa3567817006362f6ba23e082952b4b252693 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 14 May 2024 12:12:23 +0100
Subject: [PATCH] [DAG] Add legalization handling for AVGCEIL/AVGFLOOR nodes
Always match AVG patterns pre-legalization, and use TargetLowering::expandAVG to expand again during legalization.
I've removed the X86 custom AVGCEILU pattern detection and replaced with combines to try and convert other AVG nodes to AVGCEILU.
---
llvm/include/llvm/CodeGen/TargetLowering.h | 5 +
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 23 +-
llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 7 +
.../SelectionDAG/LegalizeIntegerTypes.cpp | 14 +
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 +
.../SelectionDAG/LegalizeVectorOps.cpp | 13 +
.../SelectionDAG/LegalizeVectorTypes.cpp | 8 +
.../CodeGen/SelectionDAG/TargetLowering.cpp | 58 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 198 +-
llvm/test/CodeGen/AArch64/arm64-vhadd.ll | 32 +-
llvm/test/CodeGen/AArch64/sve-hadd.ll | 108 +-
.../CodeGen/Thumb2/mve-laneinterleaving.ll | 86 +-
llvm/test/CodeGen/X86/avg.ll | 1101 ++---
llvm/test/CodeGen/X86/avgceils.ll | 3960 ++++-------------
llvm/test/CodeGen/X86/avgceilu.ll | 1789 ++------
llvm/test/CodeGen/X86/avgfloors.ll | 3522 ++++-----------
llvm/test/CodeGen/X86/avgflooru.ll | 2539 +++--------
.../CodeGen/X86/min-legal-vector-width.ll | 8 +-
18 files changed, 3506 insertions(+), 9966 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index d1912b1c4c0f6..9d8d0468d475c 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -5344,6 +5344,11 @@ class TargetLowering : public TargetLoweringBase {
/// \returns The expansion result or SDValue() if it fails.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;
+ /// Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
+ /// \param N Node to expand
+ /// \returns The expansion result or SDValue() if it fails.
+ SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const;
+
/// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
/// scalar types. Returns SDValue() if expand fails.
/// \param N Node to expand
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 02cd125eeff09..6d120a832d7d8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2574,13 +2574,13 @@ SDValue DAGCombiner::foldSubToAvg(SDNode *N, const SDLoc &DL) {
EVT VT = N0.getValueType();
SDValue A, B;
- if (hasOperation(ISD::AVGCEILU, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::AVGCEILU, VT)) &&
sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)),
m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
m_SpecificInt(1))))) {
return DAG.getNode(ISD::AVGCEILU, DL, VT, A, B);
}
- if (hasOperation(ISD::AVGCEILS, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::AVGCEILS, VT)) &&
sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)),
m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)),
m_SpecificInt(1))))) {
@@ -2946,13 +2946,13 @@ SDValue DAGCombiner::foldAddToAvg(SDNode *N, const SDLoc &DL) {
EVT VT = N0.getValueType();
SDValue A, B;
- if (hasOperation(ISD::AVGFLOORU, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::AVGFLOORU, VT)) &&
sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)),
m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)),
m_SpecificInt(1))))) {
return DAG.getNode(ISD::AVGFLOORU, DL, VT, A, B);
}
- if (hasOperation(ISD::AVGFLOORS, VT) &&
+ if ((!LegalOperations || hasOperation(ISD::AVGFLOORS, VT)) &&
sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)),
m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)),
m_SpecificInt(1))))) {
@@ -5235,6 +5235,21 @@ SDValue DAGCombiner::visitAVG(SDNode *N) {
return DAG.getNode(ISD::SRL, DL, VT, X,
DAG.getShiftAmountConstant(1, VT, DL));
+ // Fold avgflooru(x,y) -> avgceilu(x,y-1) iff y != 0
+ // Fold avgflooru(x,y) -> avgceilu(x-1,y) iff x != 0
+ // Check if avgflooru isn't legal/custom but avgceilu is.
+ if (Opcode == ISD::AVGFLOORU && !hasOperation(ISD::AVGFLOORU, VT) &&
+ (!LegalOperations || hasOperation(ISD::AVGCEILU, VT))) {
+ if (DAG.isKnownNeverZero(N1))
+ return DAG.getNode(
+ ISD::AVGCEILU, DL, VT, N0,
+ DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getAllOnesConstant(DL, VT)));
+ if (DAG.isKnownNeverZero(N0))
+ return DAG.getNode(
+ ISD::AVGCEILU, DL, VT, N1,
+ DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getAllOnesConstant(DL, VT)));
+ }
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 27c45cab2e0d0..2c7148d3a7033 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3059,6 +3059,13 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
if ((Tmp1 = TLI.expandABD(Node, DAG)))
Results.push_back(Tmp1);
break;
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
+ if ((Tmp1 = TLI.expandAVG(Node, DAG)))
+ Results.push_back(Tmp1);
+ break;
case ISD::CTPOP:
if ((Tmp1 = TLI.expandCTPOP(Node, DAG)))
Results.push_back(Tmp1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 12f1d005249d6..f435a363051a9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -188,6 +188,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_SUB:
case ISD::VP_MUL: Res = PromoteIntRes_SimpleIntBinOp(N); break;
+ case ISD::AVGCEILS:
+ case ISD::AVGFLOORS:
case ISD::VP_SMIN:
case ISD::VP_SMAX:
case ISD::SDIV:
@@ -195,6 +197,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VP_SDIV:
case ISD::VP_SREM: Res = PromoteIntRes_SExtIntBinOp(N); break;
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORU:
case ISD::VP_UMIN:
case ISD::VP_UMAX:
case ISD::UDIV:
@@ -2818,6 +2822,11 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SSHLSAT:
case ISD::USHLSAT: ExpandIntRes_SHLSAT(N, Lo, Hi); break;
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU: ExpandIntRes_AVG(N, Lo, Hi); break;
+
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
case ISD::UMULFIX:
@@ -4120,6 +4129,11 @@ void DAGTypeLegalizer::ExpandIntRes_READCOUNTER(SDNode *N, SDValue &Lo,
ReplaceValueWith(SDValue(N, 1), R.getValue(2));
}
+void DAGTypeLegalizer::ExpandIntRes_AVG(SDNode *N, SDValue &Lo, SDValue &Hi) {
+ SDValue Result = TLI.expandAVG(N, DAG);
+ SplitInteger(Result, Lo, Hi);
+}
+
void DAGTypeLegalizer::ExpandIntRes_ADDSUBSAT(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDValue Result = TLI.expandAddSubSat(N, DAG);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 2350b562a0343..82c39f46137da 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -479,6 +479,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_XMULO (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_AVG (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_ADDSUBSAT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SHLSAT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MULFIX (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 042684a434fd5..3097720664c53 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -369,6 +369,10 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::ABS:
case ISD::ABDS:
case ISD::ABDU:
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
case ISD::BSWAP:
case ISD::BITREVERSE:
case ISD::CTLZ:
@@ -918,6 +922,15 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
return;
}
break;
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
+ if (SDValue Expanded = TLI.expandAVG(Node, DAG)) {
+ Results.push_back(Expanded);
+ return;
+ }
+ break;
case ISD::BITREVERSE:
ExpandBITREVERSE(Node, Results);
return;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 92ce3b17ed6c9..328f26c65ac76 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -126,6 +126,10 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
break;
case ISD::ADD:
case ISD::AND:
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
case ISD::FADD:
case ISD::FCOPYSIGN:
case ISD::FDIV:
@@ -1173,6 +1177,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::MUL: case ISD::VP_MUL:
case ISD::MULHS:
case ISD::MULHU:
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
case ISD::FADD: case ISD::VP_FADD:
case ISD::FSUB: case ISD::VP_FSUB:
case ISD::FMUL: case ISD::VP_FMUL:
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f856c8a51984e..6e82c36652164 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -951,11 +951,11 @@ SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts(
// Attempt to form ext(avgfloor(A, B)) from shr(add(ext(A), ext(B)), 1).
// or to form ext(avgceil(A, B)) from shr(add(ext(A), ext(B), 1), 1).
-static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG,
+static SDValue combineShiftToAVG(SDValue Op,
+ TargetLowering::TargetLoweringOpt &TLO,
const TargetLowering &TLI,
const APInt &DemandedBits,
- const APInt &DemandedElts,
- unsigned Depth) {
+ const APInt &DemandedElts, unsigned Depth) {
assert((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) &&
"SRL or SRA node is required here!");
// Is the right shift using an immediate value of 1?
@@ -1006,6 +1006,7 @@ static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG,
// If the shift is unsigned (srl):
// - Needs >= 1 zero bit for both operands.
// - Needs 1 demanded bit zero and >= 2 sign bits.
+ SelectionDAG &DAG = TLO.DAG;
unsigned ShiftOpc = Op.getOpcode();
bool IsSigned = false;
unsigned KnownBits;
@@ -1061,10 +1062,10 @@ static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG,
EVT NVT = EVT::getIntegerVT(*DAG.getContext(), llvm::bit_ceil(MinWidth));
if (VT.isVector())
NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
- if (!TLI.isOperationLegalOrCustom(AVGOpc, NVT)) {
+ if (TLO.LegalOperations() && !TLI.isOperationLegal(AVGOpc, NVT)) {
// If we could not transform, and (both) adds are nuw/nsw, we can use the
// larger type size to do the transform.
- if (!TLI.isOperationLegalOrCustom(AVGOpc, VT))
+ if (TLO.LegalOperations() && !TLI.isOperationLegal(AVGOpc, VT))
return SDValue();
if (DAG.willNotOverflowAdd(IsSigned, Add.getOperand(0),
Add.getOperand(1)) &&
@@ -2015,7 +2016,7 @@ bool TargetLowering::SimplifyDemandedBits(
}
// Try to match AVG patterns (after shift simplification).
- if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
+ if (SDValue AVG = combineShiftToAVG(Op, TLO, *this, DemandedBits,
DemandedElts, Depth + 1))
return TLO.CombineTo(Op, AVG);
@@ -2127,7 +2128,7 @@ bool TargetLowering::SimplifyDemandedBits(
}
// Try to match AVG patterns (after shift simplification).
- if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits,
+ if (SDValue AVG = combineShiftToAVG(Op, TLO, *this, DemandedBits,
DemandedElts, Depth + 1))
return TLO.CombineTo(Op, AVG);
@@ -9245,6 +9246,49 @@ SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const {
DAG.getNode(ISD::SUB, dl, VT, RHS, LHS));
}
+SDValue TargetLowering::expandAVG(SDNode *N, SelectionDAG &DAG) const {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ unsigned Opc = N->getOpcode();
+ bool IsFloor = Opc == ISD::AVGFLOORS || Opc == ISD::AVGFLOORU;
+ bool IsSigned = Opc == ISD::AVGCEILS || Opc == ISD::AVGFLOORS;
+ unsigned ShiftOpc = IsSigned ? ISD::SRA : ISD::SRL;
+ assert((Opc == ISD::AVGFLOORS || Opc == ISD::AVGCEILS ||
+ Opc == ISD::AVGFLOORU || Opc == ISD::AVGCEILU) &&
+ "Unknown AVG node");
+
+ // If the operands are already extended, we can add+shift.
+ bool IsExt =
+ (IsSigned && DAG.ComputeNumSignBits(LHS) >= 2 &&
+ DAG.ComputeNumSignBits(RHS) >= 2) ||
+ (!IsSigned && DAG.computeKnownBits(LHS).countMinLeadingZeros() >= 1 &&
+ DAG.computeKnownBits(RHS).countMinLeadingZeros() >= 1);
+ if (IsExt) {
+ SDValue Sum = DAG.getNode(ISD::ADD, dl, VT, LHS, RHS);
+ if (!IsFloor)
+ Sum = DAG.getNode(ISD::ADD, dl, VT, Sum, DAG.getConstant(1, dl, VT));
+ return DAG.getNode(ShiftOpc, dl, VT, Sum,
+ DAG.getShiftAmountConstant(1, VT, dl));
+ }
+
+ // avgceils(lhs, rhs) -> sub(or(lhs,rhs),ashr(xor(lhs,rhs),1))
+ // avgceilu(lhs, rhs) -> sub(or(lhs,rhs),lshr(xor(lhs,rhs),1))
+ // avgfloors(lhs, rhs) -> add(and(lhs,rhs),ashr(xor(lhs,rhs),1))
+ // avgflooru(lhs, rhs) -> add(and(lhs,rhs),lshr(xor(lhs,rhs),1))
+ unsigned SumOpc = IsFloor ? ISD::ADD : ISD::SUB;
+ unsigned SignOpc = IsFloor ? ISD::AND : ISD::OR;
+ LHS = DAG.getFreeze(LHS);
+ RHS = DAG.getFreeze(RHS);
+ SDValue Sign = DAG.getNode(SignOpc, dl, VT, LHS, RHS);
+ SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS);
+ SDValue Shift =
+ DAG.getNode(ShiftOpc, dl, VT, Xor, DAG.getShiftAmountConstant(1, VT, dl));
+ return DAG.getNode(SumOpc, dl, VT, Sign, Shift);
+}
+
SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const {
SDLoc dl(N);
EVT VT = N->getValueType(0);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index af1e45d25aac4..7c9eac7105d75 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2516,6 +2516,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
ISD::SRL,
ISD::OR,
ISD::AND,
+ ISD::AVGCEILS,
+ ISD::AVGCEILU,
+ ISD::AVGFLOORS,
+ ISD::AVGFLOORU,
ISD::BITREVERSE,
ISD::ADD,
ISD::FADD,
@@ -50706,157 +50710,6 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
return SDValue();
}
-/// This function detects the AVG pattern between vectors of unsigned i8/i16,
-/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
-/// ISD::AVGCEILU (AVG) instruction.
-static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
- const X86Subtarget &Subtarget,
- const SDLoc &DL) {
- if (!VT.isVector())
- return SDValue();
- EVT InVT = In.getValueType();
- unsigned NumElems = VT.getVectorNumElements();
-
- EVT ScalarVT = VT.getVectorElementType();
- if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
- return SDValue();
-
- // InScalarVT is the intermediate type in AVG pattern and it should be greater
- // than the original input type (i8/i16).
- EVT InScalarVT = InVT.getVectorElementType();
- if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
- return SDValue();
-
- if (!Subtarget.hasSSE2())
- return SDValue();
-
- // Detect the following pattern:
- //
- // %1 = zext <N x i8> %a to <N x i32>
- // %2 = zext <N x i8> %b to <N x i32>
- // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
- // %4 = add nuw nsw <N x i32> %3, %2
- // %5 = lshr <N x i32> %N, <i32 1 x N>
- // %6 = trunc <N x i32> %5 to <N x i8>
- //
- // In AVX512, the last instruction can also be a trunc store.
- if (In.getOpcode() != ISD::SRL)
- return SDValue();
-
- // A lambda checking the given SDValue is a constant vector and each element
- // is in the range [Min, Max].
- auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
- return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
- return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
- });
- };
-
- auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
- unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
- return MaxActiveBits <= ScalarVT.getSizeInBits();
- };
-
- // Check if each element of the vector is right-shifted by one.
- SDValue LHS = In.getOperand(0);
- SDValue RHS = In.getOperand(1);
- if (!IsConstVectorInRange(RHS, 1, 1))
- return SDValue();
- if (LHS.getOpcode() != ISD::ADD)
- return SDValue();
-
- // Detect a pattern of a + b + 1 where the order doesn't matter.
- SDValue Operands[3];
- Operands[0] = LHS.getOperand(0);
- Operands[1] = LHS.getOperand(1);
-
- auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
- ArrayRef<SDValue> Ops) {
- return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
- };
-
- auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
- for (SDValue &Op : Ops)
- if (Op.getValueType() != VT)
- Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
- // Pad to a power-of-2 vector, split+apply and extract the original vector.
- unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
- EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
- if (NumElemsPow2 != NumElems) {
- for (SDValue &Op : Ops) {
- SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
- for (unsigned i = 0; i != NumElems; ++i) {
- SDValue Idx = DAG.getIntPtrConstant(i, DL);
- EltsOfOp[i] =
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
- }
- Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
- }
- }
- SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
- if (NumElemsPow2 == NumElems)
- return Res;
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
- DAG.getIntPtrConstant(0, DL));
- };
-
- // Take care of the case when one of the operands is a constant vector whose
- // element is in the range [1, 256].
- if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
- IsZExtLike(Operands[0])) {
- // The pattern is detected. Subtract one from the constant vector, then
- // demote it and emit X86ISD::AVG instruction.
- SDValue VecOnes = DAG.getConstant(1, DL, InVT);
- Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
- return AVGSplitter({Operands[0], Operands[1]});
- }
-
- // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
- // Match the or case only if its 'add-like' - can be replaced by an add.
- auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
- if (ISD::ADD == V.getOpcode()) {
- Op0 = V.getOperand(0);
- Op1 = V.getOperand(1);
- return true;
- }
- if (ISD::ZERO_EXTEND != V.getOpcode())
- return false;
- V = V.getOperand(0);
- if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
- !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
- return false;
- Op0 = V.getOperand(0);
- Op1 = V.getOperand(1);
- return true;
- };
-
- SDValue Op0, Op1;
- if (FindAddLike(Operands[0], Op0, Op1))
- std::swap(Operands[0], Operands[1]);
- else if (!FindAddLike(Operands[1], Op0, Op1))
- return SDValue();
- Operands[2] = Op0;
- Operands[1] = Op1;
-
- // Now we have three operands of two additions. Check that one of them is a
- // constant vector with ones, and the other two can be promoted from i8/i16.
- for (SDValue &Op : Operands) {
- if (!IsConstVectorInRange(Op, 1, 1))
- continue;
- std::swap(Op, Operands[2]);
-
- // Check if Operands[0] and Operands[1] are results of type promotion.
- for (int j = 0; j < 2; ++j)
- if (Operands[j].getValueType() != VT)
- if (!IsZExtLike(Operands[j]))
- return SDValue();
-
- // The pattern is detected, emit X86ISD::AVG instruction(s).
- return AVGSplitter({Operands[0], Operands[1]});
- }
-
- return SDValue();
-}
-
static SDValue combineConstantPoolLoads(SDNode *N, const SDLoc &dl,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
@@ -51496,16 +51349,6 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
// First, pack all of the elements in one place. Next, store to memory
// in fewer chunks.
if (St->isTruncatingStore() && VT.isVector()) {
- // Check if we can detect an AVG pattern from the truncation. If yes,
- // replace the trunc store by a normal store with the result of X86ISD::AVG
- // instruction.
- if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
- if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
- Subtarget, dl))
- return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
- St->getPointerInfo(), St->getOriginalAlign(),
- St->getMemOperand()->getFlags());
-
if (TLI.isTruncStoreLegal(VT, StVT)) {
if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
@@ -52361,10 +52204,6 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
return V;
- // Try to detect AVG pattern first.
- if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
- return Avg;
-
// Try to detect PMADD
if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
return PMAdd;
@@ -52890,6 +52729,31 @@ static SDValue combineBITREVERSE(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Various combines to try to convert to avgceilu.
+static SDValue combineAVG(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ unsigned Opcode = N->getOpcode();
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N->getValueType(0);
+ EVT SVT = VT.getScalarType();
+ SDLoc DL(N);
+
+ // avgceils(x,y) -> flipsign(avgceilu(flipsign(x),flipsign(y)))
+ // Only useful on vXi8 which doesn't have good SRA handling.
+ if (Opcode == ISD::AVGCEILS && VT.isVector() && SVT == MVT::i8) {
+ APInt SignBit = APInt::getSignMask(VT.getScalarSizeInBits());
+ SDValue SignMask = DAG.getConstant(SignBit, DL, VT);
+ N0 = DAG.getNode(ISD::XOR, DL, VT, N0, SignMask);
+ N1 = DAG.getNode(ISD::XOR, DL, VT, N1, SignMask);
+ return DAG.getNode(ISD::XOR, DL, VT,
+ DAG.getNode(ISD::AVGCEILU, DL, VT, N0, N1), SignMask);
+ }
+
+ return SDValue();
+}
+
static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -57336,6 +57200,10 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
case ISD::BITREVERSE: return combineBITREVERSE(N, DAG, DCI, Subtarget);
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU: return combineAVG(N, DAG, DCI, Subtarget);
case X86ISD::BEXTR:
case X86ISD::BEXTRI: return combineBEXTR(N, DAG, DCI, Subtarget);
case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
diff --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index a8be8bbd193a8..5743956a6aa73 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -810,10 +810,10 @@ define <4 x i64> @hadd32_zext_lsr(<4 x i32> %src1, <4 x i32> %src2) {
define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: hadd8_sext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl.4h v0, v0, #8
; CHECK-NEXT: shl.4h v1, v1, #8
-; CHECK-NEXT: sshr.4h v0, v0, #8
+; CHECK-NEXT: shl.4h v0, v0, #8
; CHECK-NEXT: sshr.4h v1, v1, #8
+; CHECK-NEXT: sshr.4h v0, v0, #8
; CHECK-NEXT: shadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = sext <4 x i8> %src1 to <4 x i16>
@@ -826,8 +826,8 @@ define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
define <4 x i16> @hadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: hadd8_zext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: bic.4h v1, #255, lsl #8
+; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: uhadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -856,8 +856,8 @@ define <4 x i16> @hadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: hadd8_zext_lsr:
; CHECK: // %bb.0:
-; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: bic.4h v1, #255, lsl #8
+; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: uhadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -870,10 +870,10 @@ define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
define <2 x i16> @hadd8x2_sext_asr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: hadd8x2_sext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl.2s v0, v0, #24
; CHECK-NEXT: shl.2s v1, v1, #24
-; CHECK-NEXT: sshr.2s v0, v0, #24
+; CHECK-NEXT: shl.2s v0, v0, #24
; CHECK-NEXT: sshr.2s v1, v1, #24
+; CHECK-NEXT: sshr.2s v0, v0, #24
; CHECK-NEXT: shadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = sext <2 x i8> %src1 to <2 x i16>
@@ -887,8 +887,8 @@ define <2 x i16> @hadd8x2_zext_asr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: hadd8x2_zext_asr:
; CHECK: // %bb.0:
; CHECK-NEXT: movi d2, #0x0000ff000000ff
-; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: and.8b v1, v1, v2
+; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: uhadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -920,8 +920,8 @@ define <2 x i16> @hadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: hadd8x2_zext_lsr:
; CHECK: // %bb.0:
; CHECK-NEXT: movi d2, #0x0000ff000000ff
-; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: and.8b v1, v1, v2
+; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: uhadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -934,10 +934,10 @@ define <2 x i16> @hadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
define <4 x i16> @rhadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: rhadd8_sext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl.4h v0, v0, #8
; CHECK-NEXT: shl.4h v1, v1, #8
-; CHECK-NEXT: sshr.4h v0, v0, #8
+; CHECK-NEXT: shl.4h v0, v0, #8
; CHECK-NEXT: sshr.4h v1, v1, #8
+; CHECK-NEXT: sshr.4h v0, v0, #8
; CHECK-NEXT: srhadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = sext <4 x i8> %src1 to <4 x i16>
@@ -951,8 +951,8 @@ define <4 x i16> @rhadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
define <4 x i16> @rhadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: rhadd8_zext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: bic.4h v1, #255, lsl #8
+; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: urhadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -985,8 +985,8 @@ define <4 x i16> @rhadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
define <4 x i16> @rhadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
; CHECK-LABEL: rhadd8_zext_lsr:
; CHECK: // %bb.0:
-; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: bic.4h v1, #255, lsl #8
+; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: urhadd.4h v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -1000,10 +1000,10 @@ define <4 x i16> @rhadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
define <2 x i16> @rhadd8x2_sext_asr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: rhadd8x2_sext_asr:
; CHECK: // %bb.0:
-; CHECK-NEXT: shl.2s v0, v0, #24
; CHECK-NEXT: shl.2s v1, v1, #24
-; CHECK-NEXT: sshr.2s v0, v0, #24
+; CHECK-NEXT: shl.2s v0, v0, #24
; CHECK-NEXT: sshr.2s v1, v1, #24
+; CHECK-NEXT: sshr.2s v0, v0, #24
; CHECK-NEXT: srhadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = sext <2 x i8> %src1 to <2 x i16>
@@ -1018,8 +1018,8 @@ define <2 x i16> @rhadd8x2_zext_asr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: rhadd8x2_zext_asr:
; CHECK: // %bb.0:
; CHECK-NEXT: movi d2, #0x0000ff000000ff
-; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: and.8b v1, v1, v2
+; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: urhadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -1055,8 +1055,8 @@ define <2 x i16> @rhadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
; CHECK-LABEL: rhadd8x2_zext_lsr:
; CHECK: // %bb.0:
; CHECK-NEXT: movi d2, #0x0000ff000000ff
-; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: and.8b v1, v1, v2
+; CHECK-NEXT: and.8b v0, v0, v2
; CHECK-NEXT: urhadd.2s v0, v0, v1
; CHECK-NEXT: ret
%zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
diff --git a/llvm/test/CodeGen/AArch64/sve-hadd.ll b/llvm/test/CodeGen/AArch64/sve-hadd.ll
index f90aef8daa5dc..3fead88780e7d 100644
--- a/llvm/test/CodeGen/AArch64/sve-hadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-hadd.ll
@@ -89,8 +89,8 @@ define <vscale x 2 x i32> @hadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32
; SVE2-LABEL: hadds_v2i32:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.d
-; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
; SVE2-NEXT: sxtw z1.d, p0/m, z1.d
+; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
entry:
@@ -129,8 +129,8 @@ define <vscale x 2 x i32> @haddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32
;
; SVE2-LABEL: haddu_v2i32:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
; SVE2-NEXT: and z1.d, z1.d, #0xffffffff
+; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
; SVE2-NEXT: ptrue p0.d
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
@@ -222,8 +222,8 @@ define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16
; SVE-LABEL: hadds_v2i16:
; SVE: // %bb.0: // %entry
; SVE-NEXT: ptrue p0.d
-; SVE-NEXT: sxth z0.d, p0/m, z0.d
; SVE-NEXT: sxth z1.d, p0/m, z1.d
+; SVE-NEXT: sxth z0.d, p0/m, z0.d
; SVE-NEXT: add z0.d, z0.d, z1.d
; SVE-NEXT: asr z0.d, z0.d, #1
; SVE-NEXT: ret
@@ -231,8 +231,8 @@ define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16
; SVE2-LABEL: hadds_v2i16:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.d
-; SVE2-NEXT: sxth z0.d, p0/m, z0.d
; SVE2-NEXT: sxth z1.d, p0/m, z1.d
+; SVE2-NEXT: sxth z0.d, p0/m, z0.d
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
entry:
@@ -266,16 +266,16 @@ entry:
define <vscale x 2 x i16> @haddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
; SVE-LABEL: haddu_v2i16:
; SVE: // %bb.0: // %entry
-; SVE-NEXT: and z0.d, z0.d, #0xffff
; SVE-NEXT: and z1.d, z1.d, #0xffff
+; SVE-NEXT: and z0.d, z0.d, #0xffff
; SVE-NEXT: add z0.d, z0.d, z1.d
; SVE-NEXT: lsr z0.d, z0.d, #1
; SVE-NEXT: ret
;
; SVE2-LABEL: haddu_v2i16:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.d, z0.d, #0xffff
; SVE2-NEXT: and z1.d, z1.d, #0xffff
+; SVE2-NEXT: and z0.d, z0.d, #0xffff
; SVE2-NEXT: ptrue p0.d
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
@@ -292,8 +292,8 @@ define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16
; SVE-LABEL: hadds_v4i16:
; SVE: // %bb.0: // %entry
; SVE-NEXT: ptrue p0.s
-; SVE-NEXT: sxth z0.s, p0/m, z0.s
; SVE-NEXT: sxth z1.s, p0/m, z1.s
+; SVE-NEXT: sxth z0.s, p0/m, z0.s
; SVE-NEXT: add z0.s, z0.s, z1.s
; SVE-NEXT: asr z0.s, z0.s, #1
; SVE-NEXT: ret
@@ -301,8 +301,8 @@ define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16
; SVE2-LABEL: hadds_v4i16:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.s
-; SVE2-NEXT: sxth z0.s, p0/m, z0.s
; SVE2-NEXT: sxth z1.s, p0/m, z1.s
+; SVE2-NEXT: sxth z0.s, p0/m, z0.s
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
entry:
@@ -335,16 +335,16 @@ entry:
define <vscale x 4 x i16> @haddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
; SVE-LABEL: haddu_v4i16:
; SVE: // %bb.0: // %entry
-; SVE-NEXT: and z0.s, z0.s, #0xffff
; SVE-NEXT: and z1.s, z1.s, #0xffff
+; SVE-NEXT: and z0.s, z0.s, #0xffff
; SVE-NEXT: add z0.s, z0.s, z1.s
; SVE-NEXT: lsr z0.s, z0.s, #1
; SVE-NEXT: ret
;
; SVE2-LABEL: haddu_v4i16:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.s, z0.s, #0xffff
; SVE2-NEXT: and z1.s, z1.s, #0xffff
+; SVE2-NEXT: and z0.s, z0.s, #0xffff
; SVE2-NEXT: ptrue p0.s
; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
@@ -436,8 +436,8 @@ define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s
; SVE-LABEL: hadds_v4i8:
; SVE: // %bb.0: // %entry
; SVE-NEXT: ptrue p0.s
-; SVE-NEXT: sxtb z0.s, p0/m, z0.s
; SVE-NEXT: sxtb z1.s, p0/m, z1.s
+; SVE-NEXT: sxtb z0.s, p0/m, z0.s
; SVE-NEXT: add z0.s, z0.s, z1.s
; SVE-NEXT: asr z0.s, z0.s, #1
; SVE-NEXT: ret
@@ -445,8 +445,8 @@ define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s
; SVE2-LABEL: hadds_v4i8:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.s
-; SVE2-NEXT: sxtb z0.s, p0/m, z0.s
; SVE2-NEXT: sxtb z1.s, p0/m, z1.s
+; SVE2-NEXT: sxtb z0.s, p0/m, z0.s
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
entry:
@@ -480,16 +480,16 @@ entry:
define <vscale x 4 x i8> @haddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
; SVE-LABEL: haddu_v4i8:
; SVE: // %bb.0: // %entry
-; SVE-NEXT: and z0.s, z0.s, #0xff
; SVE-NEXT: and z1.s, z1.s, #0xff
+; SVE-NEXT: and z0.s, z0.s, #0xff
; SVE-NEXT: add z0.s, z0.s, z1.s
; SVE-NEXT: lsr z0.s, z0.s, #1
; SVE-NEXT: ret
;
; SVE2-LABEL: haddu_v4i8:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.s, z0.s, #0xff
; SVE2-NEXT: and z1.s, z1.s, #0xff
+; SVE2-NEXT: and z0.s, z0.s, #0xff
; SVE2-NEXT: ptrue p0.s
; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
@@ -506,8 +506,8 @@ define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s
; SVE-LABEL: hadds_v8i8:
; SVE: // %bb.0: // %entry
; SVE-NEXT: ptrue p0.h
-; SVE-NEXT: sxtb z0.h, p0/m, z0.h
; SVE-NEXT: sxtb z1.h, p0/m, z1.h
+; SVE-NEXT: sxtb z0.h, p0/m, z0.h
; SVE-NEXT: add z0.h, z0.h, z1.h
; SVE-NEXT: asr z0.h, z0.h, #1
; SVE-NEXT: ret
@@ -515,8 +515,8 @@ define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s
; SVE2-LABEL: hadds_v8i8:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.h
-; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
; SVE2-NEXT: sxtb z1.h, p0/m, z1.h
+; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
; SVE2-NEXT: shadd z0.h, p0/m, z0.h, z1.h
; SVE2-NEXT: ret
entry:
@@ -549,16 +549,16 @@ entry:
define <vscale x 8 x i8> @haddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
; SVE-LABEL: haddu_v8i8:
; SVE: // %bb.0: // %entry
-; SVE-NEXT: and z0.h, z0.h, #0xff
; SVE-NEXT: and z1.h, z1.h, #0xff
+; SVE-NEXT: and z0.h, z0.h, #0xff
; SVE-NEXT: add z0.h, z0.h, z1.h
; SVE-NEXT: lsr z0.h, z0.h, #1
; SVE-NEXT: ret
;
; SVE2-LABEL: haddu_v8i8:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.h, z0.h, #0xff
; SVE2-NEXT: and z1.h, z1.h, #0xff
+; SVE2-NEXT: and z0.h, z0.h, #0xff
; SVE2-NEXT: ptrue p0.h
; SVE2-NEXT: uhadd z0.h, p0/m, z0.h, z1.h
; SVE2-NEXT: ret
@@ -739,8 +739,8 @@ define <vscale x 2 x i32> @rhadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i3
; SVE2-LABEL: rhadds_v2i32:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.d
-; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
; SVE2-NEXT: sxtw z1.d, p0/m, z1.d
+; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
entry:
@@ -787,8 +787,8 @@ define <vscale x 2 x i32> @rhaddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i3
;
; SVE2-LABEL: rhaddu_v2i32:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
; SVE2-NEXT: and z1.d, z1.d, #0xffffffff
+; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
; SVE2-NEXT: ptrue p0.d
; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
@@ -881,16 +881,24 @@ entry:
}
define <vscale x 2 x i16> @rhadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
-; CHECK-LABEL: rhadds_v2i16:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: sxth z0.d, p0/m, z0.d
-; CHECK-NEXT: sxth z1.d, p0/m, z1.d
-; CHECK-NEXT: eor z0.d, z0.d, z2.d
-; CHECK-NEXT: sub z0.d, z1.d, z0.d
-; CHECK-NEXT: asr z0.d, z0.d, #1
-; CHECK-NEXT: ret
+; SVE-LABEL: rhadds_v2i16:
+; SVE: // %bb.0: // %entry
+; SVE-NEXT: ptrue p0.d
+; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
+; SVE-NEXT: sxth z0.d, p0/m, z0.d
+; SVE-NEXT: sxth z1.d, p0/m, z1.d
+; SVE-NEXT: eor z0.d, z0.d, z2.d
+; SVE-NEXT: sub z0.d, z1.d, z0.d
+; SVE-NEXT: asr z0.d, z0.d, #1
+; SVE-NEXT: ret
+;
+; SVE2-LABEL: rhadds_v2i16:
+; SVE2: // %bb.0: // %entry
+; SVE2-NEXT: ptrue p0.d
+; SVE2-NEXT: sxth z1.d, p0/m, z1.d
+; SVE2-NEXT: sxth z0.d, p0/m, z0.d
+; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
+; SVE2-NEXT: ret
entry:
%s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
%s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
@@ -936,8 +944,8 @@ define <vscale x 2 x i16> @rhaddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i1
;
; SVE2-LABEL: rhaddu_v2i16:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.d, z0.d, #0xffff
; SVE2-NEXT: and z1.d, z1.d, #0xffff
+; SVE2-NEXT: and z0.d, z0.d, #0xffff
; SVE2-NEXT: ptrue p0.d
; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d
; SVE2-NEXT: ret
@@ -966,8 +974,8 @@ define <vscale x 4 x i16> @rhadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i1
; SVE2-LABEL: rhadds_v4i16:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.s
-; SVE2-NEXT: sxth z0.s, p0/m, z0.s
; SVE2-NEXT: sxth z1.s, p0/m, z1.s
+; SVE2-NEXT: sxth z0.s, p0/m, z0.s
; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
entry:
@@ -1014,8 +1022,8 @@ define <vscale x 4 x i16> @rhaddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i1
;
; SVE2-LABEL: rhaddu_v4i16:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.s, z0.s, #0xffff
; SVE2-NEXT: and z1.s, z1.s, #0xffff
+; SVE2-NEXT: and z0.s, z0.s, #0xffff
; SVE2-NEXT: ptrue p0.s
; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
@@ -1108,16 +1116,24 @@ entry:
}
define <vscale x 4 x i8> @rhadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
-; CHECK-LABEL: rhadds_v4i8:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
-; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
-; CHECK-NEXT: sxtb z1.s, p0/m, z1.s
-; CHECK-NEXT: eor z0.d, z0.d, z2.d
-; CHECK-NEXT: sub z0.s, z1.s, z0.s
-; CHECK-NEXT: asr z0.s, z0.s, #1
-; CHECK-NEXT: ret
+; SVE-LABEL: rhadds_v4i8:
+; SVE: // %bb.0: // %entry
+; SVE-NEXT: ptrue p0.s
+; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
+; SVE-NEXT: sxtb z0.s, p0/m, z0.s
+; SVE-NEXT: sxtb z1.s, p0/m, z1.s
+; SVE-NEXT: eor z0.d, z0.d, z2.d
+; SVE-NEXT: sub z0.s, z1.s, z0.s
+; SVE-NEXT: asr z0.s, z0.s, #1
+; SVE-NEXT: ret
+;
+; SVE2-LABEL: rhadds_v4i8:
+; SVE2: // %bb.0: // %entry
+; SVE2-NEXT: ptrue p0.s
+; SVE2-NEXT: sxtb z1.s, p0/m, z1.s
+; SVE2-NEXT: sxtb z0.s, p0/m, z0.s
+; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
+; SVE2-NEXT: ret
entry:
%s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
%s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
@@ -1163,8 +1179,8 @@ define <vscale x 4 x i8> @rhaddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %
;
; SVE2-LABEL: rhaddu_v4i8:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.s, z0.s, #0xff
; SVE2-NEXT: and z1.s, z1.s, #0xff
+; SVE2-NEXT: and z0.s, z0.s, #0xff
; SVE2-NEXT: ptrue p0.s
; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s
; SVE2-NEXT: ret
@@ -1193,8 +1209,8 @@ define <vscale x 8 x i8> @rhadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %
; SVE2-LABEL: rhadds_v8i8:
; SVE2: // %bb.0: // %entry
; SVE2-NEXT: ptrue p0.h
-; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
; SVE2-NEXT: sxtb z1.h, p0/m, z1.h
+; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
; SVE2-NEXT: srhadd z0.h, p0/m, z0.h, z1.h
; SVE2-NEXT: ret
entry:
@@ -1241,8 +1257,8 @@ define <vscale x 8 x i8> @rhaddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %
;
; SVE2-LABEL: rhaddu_v8i8:
; SVE2: // %bb.0: // %entry
-; SVE2-NEXT: and z0.h, z0.h, #0xff
; SVE2-NEXT: and z1.h, z1.h, #0xff
+; SVE2-NEXT: and z0.h, z0.h, #0xff
; SVE2-NEXT: ptrue p0.h
; SVE2-NEXT: urhadd z0.h, p0/m, z0.h, z1.h
; SVE2-NEXT: ret
diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll
index af0920475dbf4..fe28f785623ed 100644
--- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll
@@ -199,7 +199,7 @@ define arm_aapcs_vfpcc <4 x i32> @ext_add_ashr_trunc_i32(<4 x i32> %a, <4 x i32>
; CHECK-NEXT: vmov r0, s2
; CHECK-NEXT: adcs r1, r5
; CHECK-NEXT: vmov r5, s0
-; CHECK-NEXT: lsrl r2, r1, #1
+; CHECK-NEXT: asrl r2, r1, #1
; CHECK-NEXT: asrs r1, r0, #31
; CHECK-NEXT: adds.w r0, r0, lr
; CHECK-NEXT: adc.w r1, r1, r12
@@ -207,15 +207,15 @@ define arm_aapcs_vfpcc <4 x i32> @ext_add_ashr_trunc_i32(<4 x i32> %a, <4 x i32>
; CHECK-NEXT: adds r6, r5, r3
; CHECK-NEXT: vmov r3, r5, d3
; CHECK-NEXT: vmov.f32 s6, s1
-; CHECK-NEXT: lsrl r0, r1, #1
+; CHECK-NEXT: asrl r0, r1, #1
; CHECK-NEXT: adcs r7, r4
-; CHECK-NEXT: lsrl r6, r7, #1
+; CHECK-NEXT: asrl r6, r7, #1
; CHECK-NEXT: vmov q0[2], q0[0], r6, r2
; CHECK-NEXT: vmov r1, s6
; CHECK-NEXT: adds r6, r1, r3
; CHECK-NEXT: asr.w r2, r1, #31
; CHECK-NEXT: adc.w r1, r2, r5
-; CHECK-NEXT: lsrl r6, r1, #1
+; CHECK-NEXT: asrl r6, r1, #1
; CHECK-NEXT: vmov q0[3], q0[1], r6, r0
; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
entry:
@@ -250,15 +250,13 @@ entry:
define arm_aapcs_vfpcc <16 x i8> @ext_add_ashr_trunc_i8(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: ext_add_ashr_trunc_i8:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: vmovlb.u8 q2, q1
-; CHECK-NEXT: vmovlb.s8 q3, q0
-; CHECK-NEXT: vmovlt.u8 q1, q1
-; CHECK-NEXT: vmovlt.s8 q0, q0
-; CHECK-NEXT: vadd.i16 q0, q0, q1
-; CHECK-NEXT: vadd.i16 q2, q3, q2
-; CHECK-NEXT: vshr.u16 q1, q0, #1
-; CHECK-NEXT: vshr.u16 q0, q2, #1
-; CHECK-NEXT: vmovnt.i16 q0, q1
+; CHECK-NEXT: vmovlt.u8 q2, q1
+; CHECK-NEXT: vmovlt.s8 q3, q0
+; CHECK-NEXT: vmovlb.u8 q1, q1
+; CHECK-NEXT: vmovlb.s8 q0, q0
+; CHECK-NEXT: vhadd.s16 q2, q3, q2
+; CHECK-NEXT: vhadd.s16 q0, q0, q1
+; CHECK-NEXT: vmovnt.i16 q0, q2
; CHECK-NEXT: bx lr
entry:
%sa = sext <16 x i8> %a to <16 x i16>
@@ -272,50 +270,24 @@ entry:
define arm_aapcs_vfpcc <16 x i8> @ext_add_ashr_trunc_i8i32(<16 x i8> %a, <16 x i8> %b) {
; CHECK-LABEL: ext_add_ashr_trunc_i8i32:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: .save {r4, r5, r7, lr}
-; CHECK-NEXT: push {r4, r5, r7, lr}
-; CHECK-NEXT: .pad #112
-; CHECK-NEXT: sub sp, #112
-; CHECK-NEXT: add r1, sp, #16
-; CHECK-NEXT: mov r4, sp
-; CHECK-NEXT: vstrw.32 q1, [r1]
-; CHECK-NEXT: vstrw.32 q0, [r4]
-; CHECK-NEXT: vldrb.u16 q0, [r1, #8]
-; CHECK-NEXT: add r3, sp, #64
-; CHECK-NEXT: add r5, sp, #32
-; CHECK-NEXT: add r0, sp, #80
-; CHECK-NEXT: vstrw.32 q0, [r3]
-; CHECK-NEXT: add r2, sp, #48
-; CHECK-NEXT: vldrb.s16 q0, [r4, #8]
-; CHECK-NEXT: vstrw.32 q0, [r5]
-; CHECK-NEXT: vldrb.u16 q0, [r1]
-; CHECK-NEXT: add r1, sp, #96
-; CHECK-NEXT: vstrw.32 q0, [r0]
-; CHECK-NEXT: vldrb.s16 q0, [r4]
-; CHECK-NEXT: vstrw.32 q0, [r2]
-; CHECK-NEXT: vldrh.u32 q0, [r3, #8]
-; CHECK-NEXT: vldrh.s32 q1, [r5, #8]
-; CHECK-NEXT: vadd.i32 q0, q1, q0
-; CHECK-NEXT: vshr.u32 q0, q0, #1
-; CHECK-NEXT: vstrb.32 q0, [r1, #12]
-; CHECK-NEXT: vldrh.u32 q0, [r3]
-; CHECK-NEXT: vldrh.s32 q1, [r5]
-; CHECK-NEXT: vadd.i32 q0, q1, q0
-; CHECK-NEXT: vshr.u32 q0, q0, #1
-; CHECK-NEXT: vstrb.32 q0, [r1, #8]
-; CHECK-NEXT: vldrh.u32 q0, [r0, #8]
-; CHECK-NEXT: vldrh.s32 q1, [r2, #8]
-; CHECK-NEXT: vadd.i32 q0, q1, q0
-; CHECK-NEXT: vshr.u32 q0, q0, #1
-; CHECK-NEXT: vstrb.32 q0, [r1, #4]
-; CHECK-NEXT: vldrh.u32 q0, [r0]
-; CHECK-NEXT: vldrh.s32 q1, [r2]
-; CHECK-NEXT: vadd.i32 q0, q1, q0
-; CHECK-NEXT: vshr.u32 q0, q0, #1
-; CHECK-NEXT: vstrb.32 q0, [r1]
-; CHECK-NEXT: vldrw.u32 q0, [r1]
-; CHECK-NEXT: add sp, #112
-; CHECK-NEXT: pop {r4, r5, r7, pc}
+; CHECK-NEXT: .pad #48
+; CHECK-NEXT: sub sp, #48
+; CHECK-NEXT: add r0, sp, #16
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: vstrw.32 q1, [r0]
+; CHECK-NEXT: vstrw.32 q0, [r1]
+; CHECK-NEXT: vldrb.u16 q0, [r0, #8]
+; CHECK-NEXT: vldrb.s16 q1, [r1, #8]
+; CHECK-NEXT: add r2, sp, #32
+; CHECK-NEXT: vhadd.s16 q0, q1, q0
+; CHECK-NEXT: vstrb.16 q0, [r2, #8]
+; CHECK-NEXT: vldrb.u16 q0, [r0]
+; CHECK-NEXT: vldrb.s16 q1, [r1]
+; CHECK-NEXT: vhadd.s16 q0, q1, q0
+; CHECK-NEXT: vstrb.16 q0, [r2]
+; CHECK-NEXT: vldrw.u32 q0, [r2]
+; CHECK-NEXT: add sp, #48
+; CHECK-NEXT: bx lr
entry:
%sa = sext <16 x i8> %a to <16 x i32>
%sb = zext <16 x i8> %b to <16 x i32>
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 6687346604adf..698e15384f0ee 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -18,7 +18,7 @@ define void @avg_v4i8(ptr %a, ptr %b) nounwind {
; AVX: # %bb.0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, (%rax)
; AVX-NEXT: retq
%1 = load <4 x i8>, ptr %a
@@ -46,7 +46,7 @@ define void @avg_v8i8(ptr %a, ptr %b) nounwind {
; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vpavgb %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpavgb %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, (%rax)
; AVX-NEXT: retq
%1 = load <8 x i8>, ptr %a
@@ -90,28 +90,28 @@ define void @avg_v16i8(ptr %a, ptr %b) nounwind {
define void @avg_v24i8(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v24i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pavgb (%rdi), %xmm0
-; SSE2-NEXT: pavgb 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: pavgb (%rsi), %xmm0
+; SSE2-NEXT: pavgb 16(%rsi), %xmm1
; SSE2-NEXT: movq %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v24i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
; AVX1-NEXT: vmovq %xmm1, (%rax)
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v24i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovq %xmm1, (%rax)
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
@@ -120,8 +120,8 @@ define void @avg_v24i8(ptr %a, ptr %b) nounwind {
;
; AVX512-LABEL: avg_v24i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovq %xmm1, (%rax)
; AVX512-NEXT: vmovdqu %xmm0, (%rax)
@@ -142,10 +142,10 @@ define void @avg_v24i8(ptr %a, ptr %b) nounwind {
define void @avg_v32i8(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v32i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pavgb (%rdi), %xmm0
-; SSE2-NEXT: pavgb 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: pavgb (%rsi), %xmm0
+; SSE2-NEXT: pavgb 16(%rsi), %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -190,12 +190,12 @@ define void @avg_v32i8(ptr %a, ptr %b) nounwind {
define void @avg_v48i8(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v48i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: pavgb (%rdi), %xmm0
-; SSE2-NEXT: pavgb 16(%rdi), %xmm1
-; SSE2-NEXT: pavgb 32(%rdi), %xmm2
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa 32(%rdi), %xmm2
+; SSE2-NEXT: pavgb (%rsi), %xmm0
+; SSE2-NEXT: pavgb 16(%rsi), %xmm1
+; SSE2-NEXT: pavgb 32(%rsi), %xmm2
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
@@ -203,12 +203,12 @@ define void @avg_v48i8(ptr %a, ptr %b) nounwind {
;
; AVX1-LABEL: avg_v48i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
-; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2
-; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT: vpavgb 32(%rsi), %xmm2, %xmm2
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: vmovdqu %xmm2, (%rax)
@@ -216,10 +216,10 @@ define void @avg_v48i8(ptr %a, ptr %b) nounwind {
;
; AVX2-LABEL: avg_v48i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa 32(%rsi), %xmm1
-; AVX2-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX2-NEXT: vpavgb 32(%rsi), %xmm1, %xmm1
; AVX2-NEXT: vmovdqu %xmm1, (%rax)
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
@@ -227,10 +227,10 @@ define void @avg_v48i8(ptr %a, ptr %b) nounwind {
;
; AVX512F-LABEL: avg_v48i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %xmm1
-; AVX512F-NEXT: vpavgb 32(%rdi), %xmm1, %xmm1
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rdi), %xmm1
+; AVX512F-NEXT: vpavgb 32(%rsi), %xmm1, %xmm1
; AVX512F-NEXT: vmovdqu %xmm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
; AVX512F-NEXT: vzeroupper
@@ -238,8 +238,8 @@ define void @avg_v48i8(ptr %a, ptr %b) nounwind {
;
; AVX512BW-LABEL: avg_v48i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, (%rax)
; AVX512BW-NEXT: vmovdqu %ymm0, (%rax)
; AVX512BW-NEXT: vzeroupper
@@ -259,14 +259,14 @@ define void @avg_v48i8(ptr %a, ptr %b) nounwind {
define void @avg_v64i8(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v64i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
-; SSE2-NEXT: pavgb (%rdi), %xmm0
-; SSE2-NEXT: pavgb 16(%rdi), %xmm1
-; SSE2-NEXT: pavgb 32(%rdi), %xmm2
-; SSE2-NEXT: pavgb 48(%rdi), %xmm3
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa 32(%rdi), %xmm2
+; SSE2-NEXT: movdqa 48(%rdi), %xmm3
+; SSE2-NEXT: pavgb (%rsi), %xmm0
+; SSE2-NEXT: pavgb 16(%rsi), %xmm1
+; SSE2-NEXT: pavgb 32(%rsi), %xmm2
+; SSE2-NEXT: pavgb 48(%rsi), %xmm3
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
@@ -275,14 +275,14 @@ define void @avg_v64i8(ptr %a, ptr %b) nounwind {
;
; AVX1-LABEL: avg_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
-; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3
-; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
-; AVX1-NEXT: vpavgb 32(%rdi), %xmm2, %xmm2
-; AVX1-NEXT: vpavgb 48(%rdi), %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
+; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT: vpavgb 32(%rsi), %xmm2, %xmm2
+; AVX1-NEXT: vpavgb 48(%rsi), %xmm3, %xmm3
; AVX1-NEXT: vmovdqu %xmm3, (%rax)
; AVX1-NEXT: vmovdqu %xmm2, (%rax)
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
@@ -291,10 +291,10 @@ define void @avg_v64i8(ptr %a, ptr %b) nounwind {
;
; AVX2-LABEL: avg_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
-; AVX2-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, (%rax)
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
@@ -343,7 +343,7 @@ define void @avg_v4i16(ptr %a, ptr %b) nounwind {
; AVX: # %bb.0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, (%rax)
; AVX-NEXT: retq
%1 = load <4 x i16>, ptr %a
@@ -387,10 +387,10 @@ define void @avg_v8i16(ptr %a, ptr %b) nounwind {
define void @avg_v16i16(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v16i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pavgw (%rdi), %xmm0
-; SSE2-NEXT: pavgw 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: pavgw (%rsi), %xmm0
+; SSE2-NEXT: pavgw 16(%rsi), %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -435,14 +435,14 @@ define void @avg_v16i16(ptr %a, ptr %b) nounwind {
define void @avg_v32i16(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v32i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
-; SSE2-NEXT: pavgw (%rdi), %xmm0
-; SSE2-NEXT: pavgw 16(%rdi), %xmm1
-; SSE2-NEXT: pavgw 32(%rdi), %xmm2
-; SSE2-NEXT: pavgw 48(%rdi), %xmm3
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: movdqa 16(%rdi), %xmm1
+; SSE2-NEXT: movdqa 32(%rdi), %xmm2
+; SSE2-NEXT: movdqa 48(%rdi), %xmm3
+; SSE2-NEXT: pavgw (%rsi), %xmm0
+; SSE2-NEXT: pavgw 16(%rsi), %xmm1
+; SSE2-NEXT: pavgw 32(%rsi), %xmm2
+; SSE2-NEXT: pavgw 48(%rsi), %xmm3
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
@@ -451,14 +451,14 @@ define void @avg_v32i16(ptr %a, ptr %b) nounwind {
;
; AVX1-LABEL: avg_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 32(%rsi), %xmm2
-; AVX1-NEXT: vmovdqa 48(%rsi), %xmm3
-; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgw 16(%rdi), %xmm1, %xmm1
-; AVX1-NEXT: vpavgw 32(%rdi), %xmm2, %xmm2
-; AVX1-NEXT: vpavgw 48(%rdi), %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
+; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2
+; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3
; AVX1-NEXT: vmovdqu %xmm3, (%rax)
; AVX1-NEXT: vmovdqu %xmm2, (%rax)
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
@@ -467,10 +467,10 @@ define void @avg_v32i16(ptr %a, ptr %b) nounwind {
;
; AVX2-LABEL: avg_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
-; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
; AVX2-NEXT: vmovdqu %ymm1, (%rax)
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
@@ -509,50 +509,50 @@ define void @avg_v32i16(ptr %a, ptr %b) nounwind {
define void @avg_v40i16(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: avg_v40i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: movdqa 32(%rsi), %xmm2
-; SSE2-NEXT: movdqa 48(%rsi), %xmm3
-; SSE2-NEXT: pavgw (%rdi), %xmm0
-; SSE2-NEXT: pavgw 16(%rdi), %xmm1
-; SSE2-NEXT: pavgw 32(%rdi), %xmm2
-; SSE2-NEXT: pavgw 48(%rdi), %xmm3
-; SSE2-NEXT: movdqa 64(%rsi), %xmm4
-; SSE2-NEXT: pavgw 64(%rdi), %xmm4
+; SSE2-NEXT: movdqa 64(%rdi), %xmm0
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa 16(%rdi), %xmm2
+; SSE2-NEXT: movdqa 32(%rdi), %xmm3
+; SSE2-NEXT: movdqa 48(%rdi), %xmm4
+; SSE2-NEXT: pavgw (%rsi), %xmm1
+; SSE2-NEXT: pavgw 16(%rsi), %xmm2
+; SSE2-NEXT: pavgw 32(%rsi), %xmm3
+; SSE2-NEXT: pavgw 48(%rsi), %xmm4
+; SSE2-NEXT: pavgw 64(%rsi), %xmm0
+; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: movdqu %xmm4, (%rax)
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: avg_v40i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa 64(%rsi), %xmm0
-; AVX1-NEXT: vpavgw 64(%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa (%rsi), %xmm1
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm2
-; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3
-; AVX1-NEXT: vmovdqa 48(%rsi), %xmm4
-; AVX1-NEXT: vpavgw (%rdi), %xmm1, %xmm1
-; AVX1-NEXT: vpavgw 16(%rdi), %xmm2, %xmm2
-; AVX1-NEXT: vpavgw 32(%rdi), %xmm3, %xmm3
-; AVX1-NEXT: vpavgw 48(%rdi), %xmm4, %xmm4
-; AVX1-NEXT: vmovdqu %xmm4, (%rax)
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2
+; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
+; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
+; AVX1-NEXT: vpavgw 32(%rsi), %xmm2, %xmm2
+; AVX1-NEXT: vpavgw 48(%rsi), %xmm3, %xmm3
+; AVX1-NEXT: vmovdqa 64(%rdi), %xmm4
+; AVX1-NEXT: vpavgw 64(%rsi), %xmm4, %xmm4
; AVX1-NEXT: vmovdqu %xmm3, (%rax)
; AVX1-NEXT: vmovdqu %xmm2, (%rax)
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: vmovdqu %xmm4, (%rax)
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v40i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
-; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
-; AVX2-NEXT: vmovdqa 64(%rsi), %xmm2
-; AVX2-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
+; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
+; AVX2-NEXT: vmovdqa 64(%rdi), %xmm2
+; AVX2-NEXT: vpavgw 64(%rsi), %xmm2, %xmm2
; AVX2-NEXT: vmovdqu %xmm2, (%rax)
; AVX2-NEXT: vmovdqu %ymm1, (%rax)
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
@@ -561,12 +561,12 @@ define void @avg_v40i16(ptr %a, ptr %b) nounwind {
;
; AVX512F-LABEL: avg_v40i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0
-; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa 64(%rsi), %xmm2
-; AVX512F-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa 64(%rdi), %xmm2
+; AVX512F-NEXT: vpavgw 64(%rsi), %xmm2, %xmm2
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
; AVX512F-NEXT: vmovdqu %xmm2, (%rax)
@@ -575,10 +575,10 @@ define void @avg_v40i16(ptr %a, ptr %b) nounwind {
;
; AVX512BW-LABEL: avg_v40i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa 64(%rsi), %xmm1
-; AVX512BW-NEXT: vpavgw 64(%rdi), %xmm1, %xmm1
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa 64(%rdi), %xmm1
+; AVX512BW-NEXT: vpavgw 64(%rsi), %xmm1, %xmm1
; AVX512BW-NEXT: vmovdqu %xmm1, (%rax)
; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
@@ -1728,668 +1728,235 @@ define <512 x i8> @avg_v512i8_3(<512 x i8> %a, <512 x i8> %b) nounwind {
ret <512 x i8> %res
}
-; This is not an avg, but its structurally similar and previously caused a crash
+; This is not an avgceilu, but its structurally similar and previously caused a crash
; because the constants can't be read with APInt::getZExtValue.
define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
; SSE2-LABEL: not_avg_v16i8_wide_constants:
; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: movaps (%rdi), %xmm1
-; SSE2-NEXT: movaps (%rsi), %xmm0
+; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm3
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm5
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm6
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm7
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm8
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp
-; SSE2-NEXT: addq %rdx, %rbp
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
-; SSE2-NEXT: addq %rcx, %rdx
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r13,%rcx), %r13
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r12,%rcx), %r12
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r15,%rcx), %r15
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r14,%rcx), %r14
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%rbx,%rcx), %rbx
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r11,%rcx), %r11
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r10,%rcx), %r10
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r9,%rcx), %r9
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%r8,%rcx), %r8
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%rdi,%rcx), %rdi
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%rsi,%rcx), %rsi
-; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
-; SSE2-NEXT: leaq -1(%rax,%rcx), %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm10
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: leaq -1(%rcx,%rax), %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm9
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: leaq -1(%rcx,%rax), %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: xorl %ecx, %ecx
-; SSE2-NEXT: addq $-1, %rbp
-; SSE2-NEXT: movl $0, %eax
-; SSE2-NEXT: adcq $-1, %rax
-; SSE2-NEXT: addq $-1, %rdx
-; SSE2-NEXT: adcq $-1, %rcx
-; SSE2-NEXT: shldq $63, %rdx, %rcx
-; SSE2-NEXT: shldq $63, %rbp, %rax
-; SSE2-NEXT: movq %rax, %xmm1
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: shrq %r13
-; SSE2-NEXT: movq %r13, %xmm3
-; SSE2-NEXT: shrq %r12
-; SSE2-NEXT: movq %r12, %xmm2
-; SSE2-NEXT: shrq %r15
-; SSE2-NEXT: movq %r15, %xmm5
-; SSE2-NEXT: shrq %r14
-; SSE2-NEXT: movq %r14, %xmm4
-; SSE2-NEXT: shrq %rbx
-; SSE2-NEXT: movq %rbx, %xmm6
-; SSE2-NEXT: shrq %r11
-; SSE2-NEXT: movq %r11, %xmm7
-; SSE2-NEXT: shrq %r10
-; SSE2-NEXT: movq %r10, %xmm9
-; SSE2-NEXT: shrq %r9
-; SSE2-NEXT: movq %r9, %xmm8
-; SSE2-NEXT: shrq %r8
-; SSE2-NEXT: movq %r8, %xmm11
-; SSE2-NEXT: shrq %rdi
-; SSE2-NEXT: movq %rdi, %xmm12
-; SSE2-NEXT: shrq %rsi
-; SSE2-NEXT: movq %rsi, %xmm13
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shrq %rax
-; SSE2-NEXT: movq %rax, %xmm10
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shrq %rax
-; SSE2-NEXT: movq %rax, %xmm14
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shrq %rax
-; SSE2-NEXT: movq %rax, %xmm15
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pslld $16, %xmm2
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm0, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-NEXT: psllq $48, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,65535,65535,65535,65535]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,0,1,1]
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pandn %xmm4, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; SSE2-NEXT: pslldq {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm12[0,0,0,0]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm8, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
-; SSE2-NEXT: pslldq {{.*#+}} xmm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm10[0,1,2,3,4,5]
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,0,65535,65535]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm15[0,1,0,1]
-; SSE2-NEXT: pand %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm10, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1]
-; SSE2-NEXT: movupd %xmm2, (%rax)
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm11
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm12
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm13
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm14
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: movd %eax, %xmm15
+; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; SSE2-NEXT: decl %eax
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,0,0,0]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,0,0,0]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1]
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; SSE2-NEXT: movapd %xmm4, %xmm5
+; SSE2-NEXT: andpd %xmm1, %xmm5
+; SSE2-NEXT: xorpd %xmm4, %xmm1
+; SSE2-NEXT: psrlw $1, %xmm1
+; SSE2-NEXT: paddw %xmm5, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm12[0,0,0,0]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,0,0,0]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm9[0],xmm2[1]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
+; SSE2-NEXT: movapd %xmm2, %xmm3
+; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: xorpd %xmm2, %xmm0
+; SSE2-NEXT: psrlw $1, %xmm0
+; SSE2-NEXT: paddw %xmm3, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: packuswb %xmm0, %xmm1
+; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: retq
;
; AVX1-LABEL: not_avg_v16i8_wide_constants:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX1-NEXT: vpextrw $4, %xmm0, %eax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrw $5, %xmm0, %eax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrw $6, %xmm0, %ebx
-; AVX1-NEXT: vpextrw $7, %xmm0, %esi
-; AVX1-NEXT: vpextrw $0, %xmm3, %edi
-; AVX1-NEXT: vpextrw $1, %xmm3, %r8d
-; AVX1-NEXT: vpextrw $2, %xmm3, %r9d
-; AVX1-NEXT: vpextrw $3, %xmm3, %r10d
-; AVX1-NEXT: vpextrw $4, %xmm3, %r11d
-; AVX1-NEXT: vpextrw $5, %xmm3, %r14d
-; AVX1-NEXT: vpextrw $6, %xmm3, %r15d
; AVX1-NEXT: vpextrw $7, %xmm3, %edx
-; AVX1-NEXT: vpextrw $1, %xmm0, %eax
-; AVX1-NEXT: vpextrw $0, %xmm0, %r12d
-; AVX1-NEXT: vpextrw $1, %xmm1, %ecx
-; AVX1-NEXT: addq %rax, %rcx
-; AVX1-NEXT: vpextrw $0, %xmm1, %eax
-; AVX1-NEXT: addq %r12, %rax
-; AVX1-NEXT: vpextrw $7, %xmm2, %r12d
-; AVX1-NEXT: leaq -1(%rdx,%r12), %rdx
-; AVX1-NEXT: vpextrw $6, %xmm2, %r12d
-; AVX1-NEXT: leaq -1(%r15,%r12), %rbp
-; AVX1-NEXT: vpextrw $5, %xmm2, %r15d
-; AVX1-NEXT: leaq -1(%r14,%r15), %r13
-; AVX1-NEXT: vpextrw $4, %xmm2, %r14d
-; AVX1-NEXT: leaq -1(%r11,%r14), %r12
-; AVX1-NEXT: vpextrw $3, %xmm2, %r11d
-; AVX1-NEXT: leaq -1(%r10,%r11), %r15
-; AVX1-NEXT: vpextrw $2, %xmm2, %r10d
-; AVX1-NEXT: leaq -1(%r9,%r10), %r14
-; AVX1-NEXT: vpextrw $1, %xmm2, %r9d
-; AVX1-NEXT: leaq -1(%r8,%r9), %r11
-; AVX1-NEXT: vpextrw $0, %xmm2, %r8d
-; AVX1-NEXT: leaq -1(%rdi,%r8), %r10
-; AVX1-NEXT: vpextrw $7, %xmm1, %edi
-; AVX1-NEXT: leaq -1(%rsi,%rdi), %r9
-; AVX1-NEXT: vpextrw $6, %xmm1, %esi
-; AVX1-NEXT: leaq -1(%rbx,%rsi), %r8
-; AVX1-NEXT: vpextrw $5, %xmm1, %esi
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX1-NEXT: leaq -1(%rdi,%rsi), %rsi
-; AVX1-NEXT: vpextrw $4, %xmm1, %edi
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX1-NEXT: leaq -1(%rbx,%rdi), %rdi
-; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrw $3, %xmm0, %edi
-; AVX1-NEXT: vpextrw $3, %xmm1, %ebx
-; AVX1-NEXT: leaq -1(%rdi,%rbx), %rdi
-; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrw $2, %xmm0, %edi
-; AVX1-NEXT: vpextrw $2, %xmm1, %ebx
-; AVX1-NEXT: leaq -1(%rdi,%rbx), %rdi
-; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: xorl %edi, %edi
-; AVX1-NEXT: addq $-1, %rcx
-; AVX1-NEXT: movl $0, %ebx
-; AVX1-NEXT: adcq $-1, %rbx
-; AVX1-NEXT: addq $-1, %rax
-; AVX1-NEXT: adcq $-1, %rdi
-; AVX1-NEXT: shldq $63, %rax, %rdi
-; AVX1-NEXT: shldq $63, %rcx, %rbx
-; AVX1-NEXT: shrq %rdx
-; AVX1-NEXT: vmovq %rdx, %xmm0
-; AVX1-NEXT: shrq %rbp
-; AVX1-NEXT: vmovq %rbp, %xmm1
-; AVX1-NEXT: shrq %r13
-; AVX1-NEXT: vmovq %r13, %xmm2
-; AVX1-NEXT: shrq %r12
-; AVX1-NEXT: vmovq %r12, %xmm3
-; AVX1-NEXT: shrq %r15
-; AVX1-NEXT: vmovq %r15, %xmm4
-; AVX1-NEXT: shrq %r14
-; AVX1-NEXT: vmovq %r14, %xmm5
-; AVX1-NEXT: shrq %r11
-; AVX1-NEXT: vmovq %r11, %xmm6
-; AVX1-NEXT: shrq %r10
-; AVX1-NEXT: vmovq %r10, %xmm7
-; AVX1-NEXT: shrq %r9
-; AVX1-NEXT: vmovq %r9, %xmm8
-; AVX1-NEXT: shrq %r8
-; AVX1-NEXT: vmovq %r8, %xmm9
-; AVX1-NEXT: shrq %rsi
-; AVX1-NEXT: vmovq %rsi, %xmm10
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shrq %rax
-; AVX1-NEXT: vmovq %rax, %xmm11
-; AVX1-NEXT: vmovq %rbx, %xmm12
-; AVX1-NEXT: vmovq %rdi, %xmm13
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shrq %rax
-; AVX1-NEXT: vmovq %rax, %xmm14
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shrq %rax
-; AVX1-NEXT: vmovq %rax, %xmm15
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX1-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5],xmm2[6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX1-NEXT: vpslld $16, %xmm3, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vpextrw $6, %xmm3, %ecx
+; AVX1-NEXT: vpextrw $5, %xmm3, %eax
+; AVX1-NEXT: decl %edx
+; AVX1-NEXT: vmovd %edx, %xmm4
+; AVX1-NEXT: vpextrw $4, %xmm3, %edx
+; AVX1-NEXT: decl %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm5
+; AVX1-NEXT: vpextrw $1, %xmm3, %ecx
+; AVX1-NEXT: decl %eax
+; AVX1-NEXT: vmovd %eax, %xmm6
+; AVX1-NEXT: vpextrw $0, %xmm3, %eax
+; AVX1-NEXT: decl %edx
+; AVX1-NEXT: vmovd %edx, %xmm7
+; AVX1-NEXT: vpextrw $3, %xmm3, %edx
+; AVX1-NEXT: decq %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm8
+; AVX1-NEXT: vpextrw $2, %xmm3, %ecx
+; AVX1-NEXT: decq %rax
+; AVX1-NEXT: vmovq %rax, %xmm3
+; AVX1-NEXT: vpextrw $7, %xmm2, %eax
+; AVX1-NEXT: decl %edx
+; AVX1-NEXT: vmovd %edx, %xmm9
+; AVX1-NEXT: vpextrw $6, %xmm2, %edx
+; AVX1-NEXT: decl %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm10
+; AVX1-NEXT: vpextrw $5, %xmm2, %ecx
+; AVX1-NEXT: decl %eax
+; AVX1-NEXT: vmovd %eax, %xmm11
+; AVX1-NEXT: vpextrw $4, %xmm2, %eax
+; AVX1-NEXT: decl %edx
+; AVX1-NEXT: vmovd %edx, %xmm12
+; AVX1-NEXT: vpextrw $1, %xmm2, %edx
+; AVX1-NEXT: decl %ecx
+; AVX1-NEXT: vmovd %ecx, %xmm13
+; AVX1-NEXT: vpextrw $0, %xmm2, %ecx
+; AVX1-NEXT: decl %eax
+; AVX1-NEXT: vmovd %eax, %xmm14
+; AVX1-NEXT: vpextrw $3, %xmm2, %eax
+; AVX1-NEXT: decq %rdx
+; AVX1-NEXT: vmovq %rdx, %xmm15
+; AVX1-NEXT: vpextrw $2, %xmm2, %edx
+; AVX1-NEXT: decq %rcx
+; AVX1-NEXT: vmovq %rcx, %xmm2
+; AVX1-NEXT: decl %eax
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX1-NEXT: vmovd %eax, %xmm5
+; AVX1-NEXT: decl %edx
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX1-NEXT: vmovd %edx, %xmm7
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,0,0]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,5],xmm4[6,7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5,6,7]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: not_avg_v16i8_wide_constants:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vmovq %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm7
-; AVX2-NEXT: vmovq %xmm7, %rsi
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vmovq %xmm2, %rdx
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm8
-; AVX2-NEXT: vmovq %xmm8, %r8
-; AVX2-NEXT: vpextrq $1, %xmm8, %r13
-; AVX2-NEXT: vpextrq $1, %xmm2, %r14
-; AVX2-NEXT: vpextrq $1, %xmm7, %r15
-; AVX2-NEXT: vpextrq $1, %xmm6, %r12
-; AVX2-NEXT: vpextrq $1, %xmm4, %rbx
-; AVX2-NEXT: vpextrq $1, %xmm1, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX2-NEXT: vmovq %xmm3, %rax
-; AVX2-NEXT: vpextrq $1, %xmm0, %r11
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm2
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm5
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm8 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7
-; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm8
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm8 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm8, %xmm9
-; AVX2-NEXT: vpextrq $1, %xmm9, %r9
-; AVX2-NEXT: addq %r13, %r9
-; AVX2-NEXT: movq %r9, %r13
-; AVX2-NEXT: vpextrq $1, %xmm8, %r9
-; AVX2-NEXT: addq %r14, %r9
-; AVX2-NEXT: movq %r9, %r14
-; AVX2-NEXT: vpextrq $1, %xmm7, %r10
-; AVX2-NEXT: addq %r15, %r10
-; AVX2-NEXT: vpextrq $1, %xmm5, %r15
-; AVX2-NEXT: addq %r12, %r15
-; AVX2-NEXT: vpextrq $1, %xmm4, %r12
-; AVX2-NEXT: addq %rbx, %r12
-; AVX2-NEXT: vpextrq $1, %xmm3, %rbp
-; AVX2-NEXT: addq %rdi, %rbp
-; AVX2-NEXT: vpextrq $1, %xmm6, %r9
-; AVX2-NEXT: addq %rcx, %r9
-; AVX2-NEXT: vmovq %xmm6, %rdi
-; AVX2-NEXT: addq %rax, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX2-NEXT: addq %r11, %rcx
-; AVX2-NEXT: vmovq %xmm9, %r11
-; AVX2-NEXT: leaq -1(%r8,%r11), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm8, %r8
-; AVX2-NEXT: leaq -1(%rdx,%r8), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm7, %rdx
-; AVX2-NEXT: leaq -1(%rsi,%rdx), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm5, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: leaq -1(%rax,%rdx), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm4, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: leaq -1(%rax,%rdx), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm1, %rdx
-; AVX2-NEXT: vmovq %xmm3, %rsi
-; AVX2-NEXT: leaq -1(%rdx,%rsi), %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: vmovq %xmm2, %rsi
-; AVX2-NEXT: leaq -1(%rdx,%rsi), %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: addq $-1, %r13
-; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movl $0, %edx
-; AVX2-NEXT: adcq $-1, %rdx
-; AVX2-NEXT: addq $-1, %r14
-; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movl $0, %esi
-; AVX2-NEXT: adcq $-1, %rsi
-; AVX2-NEXT: addq $-1, %r10
-; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movl $0, %r8d
-; AVX2-NEXT: adcq $-1, %r8
-; AVX2-NEXT: addq $-1, %r15
-; AVX2-NEXT: movl $0, %r10d
-; AVX2-NEXT: adcq $-1, %r10
-; AVX2-NEXT: addq $-1, %r12
-; AVX2-NEXT: movl $0, %ebx
-; AVX2-NEXT: adcq $-1, %rbx
-; AVX2-NEXT: addq $-1, %rbp
-; AVX2-NEXT: movl $0, %r14d
-; AVX2-NEXT: adcq $-1, %r14
-; AVX2-NEXT: addq $-1, %r9
-; AVX2-NEXT: movl $0, %r13d
-; AVX2-NEXT: adcq $-1, %r13
-; AVX2-NEXT: addq $-1, %rdi
-; AVX2-NEXT: movl $0, %r11d
-; AVX2-NEXT: adcq $-1, %r11
-; AVX2-NEXT: addq $-1, %rcx
-; AVX2-NEXT: movl $0, %eax
-; AVX2-NEXT: adcq $-1, %rax
-; AVX2-NEXT: shldq $63, %rcx, %rax
-; AVX2-NEXT: shldq $63, %rdi, %r11
-; AVX2-NEXT: shldq $63, %r9, %r13
-; AVX2-NEXT: shldq $63, %rbp, %r14
-; AVX2-NEXT: shldq $63, %r12, %rbx
-; AVX2-NEXT: shldq $63, %r15, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %r8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %rsi
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %rdx
-; AVX2-NEXT: vmovq %rdx, %xmm0
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vmovq %rsi, %xmm2
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm3
-; AVX2-NEXT: vmovq %r8, %xmm4
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm5
-; AVX2-NEXT: vmovq %r10, %xmm6
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm7
-; AVX2-NEXT: vmovq %rbx, %xmm8
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm9
-; AVX2-NEXT: vmovq %r14, %xmm10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shrq %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm11
-; AVX2-NEXT: vmovq %r13, %xmm12
-; AVX2-NEXT: vmovq %r11, %xmm13
-; AVX2-NEXT: vmovq %rax, %xmm14
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: shrq %rax
-; AVX2-NEXT: vmovq %rax, %xmm15
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5],xmm2[6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX2-NEXT: vpsllq $48, %xmm1, %xmm1
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX2-NEXT: vpbroadcastw %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX2-NEXT: vpslld $16, %xmm3, %xmm3
-; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3]
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: not_avg_v16i8_wide_constants:
-; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm0
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX512-NEXT: vmovq %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: vpextrq $1, %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX512-NEXT: vmovq %xmm3, %r13
-; AVX512-NEXT: vpextrq $1, %xmm3, %rsi
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX512-NEXT: vmovq %xmm3, %rdi
-; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm5
-; AVX512-NEXT: vmovq %xmm5, %r8
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-NEXT: vmovq %xmm2, %r9
-; AVX512-NEXT: vpextrq $1, %xmm2, %r10
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %r11
-; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
-; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: vpextrq $1, %xmm1, %rax
-; AVX512-NEXT: vpextrq $1, %xmm0, %r14
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm2
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6
-; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm4
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm7 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm4
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm8
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm9
-; AVX512-NEXT: vpextrq $1, %xmm8, %rbp
-; AVX512-NEXT: addq %rdx, %rbp
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX512-NEXT: addq %rax, %rcx
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: addq %r14, %rax
-; AVX512-NEXT: vpextrq $1, %xmm9, %r14
-; AVX512-NEXT: leaq -1(%rbx,%r14), %r12
-; AVX512-NEXT: vmovq %xmm9, %rbx
-; AVX512-NEXT: leaq -1(%r11,%rbx), %r15
-; AVX512-NEXT: vpextrq $1, %xmm7, %r11
-; AVX512-NEXT: leaq -1(%r10,%r11), %r14
-; AVX512-NEXT: vmovq %xmm7, %r10
-; AVX512-NEXT: leaq -1(%r9,%r10), %rbx
-; AVX512-NEXT: vmovq %xmm8, %r9
-; AVX512-NEXT: leaq -1(%r8,%r9), %r11
-; AVX512-NEXT: vmovq %xmm4, %r8
-; AVX512-NEXT: leaq -1(%rdi,%r8), %r10
-; AVX512-NEXT: vpextrq $1, %xmm6, %rdi
-; AVX512-NEXT: leaq -1(%rsi,%rdi), %r9
-; AVX512-NEXT: vmovq %xmm6, %rsi
-; AVX512-NEXT: leaq -1(%r13,%rsi), %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm5, %rsi
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: leaq -1(%rdi,%rsi), %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm5, %rsi
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: leaq -1(%rdi,%rsi), %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm1, %rsi
-; AVX512-NEXT: vmovq %xmm3, %rdi
-; AVX512-NEXT: leaq -1(%rsi,%rdi), %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: vmovq %xmm2, %rdi
-; AVX512-NEXT: leaq -1(%rsi,%rdi), %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: xorl %r8d, %r8d
-; AVX512-NEXT: addq $-1, %rbp
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: adcq $-1, %rsi
-; AVX512-NEXT: addq $-1, %rdx
-; AVX512-NEXT: movl $0, %edi
-; AVX512-NEXT: adcq $-1, %rdi
-; AVX512-NEXT: addq $-1, %rcx
-; AVX512-NEXT: movl $0, %r13d
-; AVX512-NEXT: adcq $-1, %r13
-; AVX512-NEXT: addq $-1, %rax
-; AVX512-NEXT: adcq $-1, %r8
-; AVX512-NEXT: shldq $63, %rax, %r8
-; AVX512-NEXT: shldq $63, %rcx, %r13
-; AVX512-NEXT: shldq $63, %rdx, %rdi
-; AVX512-NEXT: shldq $63, %rbp, %rsi
-; AVX512-NEXT: shrq %r12
-; AVX512-NEXT: vmovq %r12, %xmm0
-; AVX512-NEXT: shrq %r15
-; AVX512-NEXT: vmovq %r15, %xmm1
-; AVX512-NEXT: shrq %r14
-; AVX512-NEXT: vmovq %r14, %xmm2
-; AVX512-NEXT: shrq %rbx
-; AVX512-NEXT: vmovq %rbx, %xmm3
-; AVX512-NEXT: vmovq %rsi, %xmm4
-; AVX512-NEXT: shrq %r11
-; AVX512-NEXT: vmovq %r11, %xmm5
-; AVX512-NEXT: vmovq %rdi, %xmm6
-; AVX512-NEXT: shrq %r10
-; AVX512-NEXT: vmovq %r10, %xmm7
-; AVX512-NEXT: shrq %r9
-; AVX512-NEXT: vmovq %r9, %xmm8
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shrq %rax
-; AVX512-NEXT: vmovq %rax, %xmm9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shrq %rax
-; AVX512-NEXT: vmovq %rax, %xmm10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shrq %rax
-; AVX512-NEXT: vmovq %rax, %xmm11
-; AVX512-NEXT: vmovq %r13, %xmm12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shrq %rax
-; AVX512-NEXT: vmovq %rax, %xmm13
-; AVX512-NEXT: vmovq %r8, %xmm14
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shrq %rax
-; AVX512-NEXT: vmovq %rax, %xmm15
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
-; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
-; AVX512-NEXT: vpbroadcastw %xmm2, %xmm2
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX512-NEXT: vpsllq $48, %xmm2, %xmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX512-NEXT: vpbroadcastw %xmm3, %xmm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX512-NEXT: vmovdqu %xmm0, (%rax)
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX512F-LABEL: not_avg_v16i8_wide_constants:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512F-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: not_avg_v16i8_wide_constants:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%1 = load <16 x i8>, ptr %a
%2 = load <16 x i8>, ptr %b
%3 = zext <16 x i8> %1 to <16 x i128>
@@ -2406,20 +1973,20 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
define <1 x i8> @avg_v1i8(<1 x i8> %x, <1 x i8> %y) {
; SSE2-LABEL: avg_v1i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movzbl %dil, %eax
-; SSE2-NEXT: movzbl %sil, %ecx
-; SSE2-NEXT: leal 1(%rax,%rcx), %eax
-; SSE2-NEXT: shrl %eax
-; SSE2-NEXT: # kill: def $al killed $al killed $eax
+; SSE2-NEXT: movl %edi, %eax
+; SSE2-NEXT: orb %sil, %al
+; SSE2-NEXT: xorb %sil, %dil
+; SSE2-NEXT: shrb %dil
+; SSE2-NEXT: subb %dil, %al
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v1i8:
; AVX: # %bb.0:
-; AVX-NEXT: movzbl %dil, %eax
-; AVX-NEXT: movzbl %sil, %ecx
-; AVX-NEXT: leal 1(%rax,%rcx), %eax
-; AVX-NEXT: shrl %eax
-; AVX-NEXT: # kill: def $al killed $al killed $eax
+; AVX-NEXT: movl %edi, %eax
+; AVX-NEXT: orb %sil, %al
+; AVX-NEXT: xorb %sil, %dil
+; AVX-NEXT: shrb %dil
+; AVX-NEXT: subb %dil, %al
; AVX-NEXT: retq
%a = zext <1 x i8> %x to <1 x i16>
%b = zext <1 x i8> %y to <1 x i16>
diff --git a/llvm/test/CodeGen/X86/avgceils.ll b/llvm/test/CodeGen/X86/avgceils.ll
index f44f98c2a41ab..bb6f3fc9f588a 100644
--- a/llvm/test/CodeGen/X86/avgceils.ll
+++ b/llvm/test/CodeGen/X86/avgceils.ll
@@ -12,51 +12,38 @@
define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE-NEXT: pxor %xmm1, %xmm0
-; SSE-NEXT: psubb %xmm0, %xmm2
-; SSE-NEXT: paddb %xmm1, %xmm2
-; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsubb %xmm0, %xmm2, %xmm0
-; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
-; AVX512-NEXT: vpsubb %xmm0, %xmm2, %xmm0
-; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: retq
%or = or <16 x i8> %a0, %a1
%xor = xor <16 x i8> %a0, %a1
@@ -66,95 +53,40 @@ define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
}
define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: paddw %xmm2, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: psubw %xmm1, %xmm4
-; SSE2-NEXT: psubw %xmm1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
-; SSE4-NEXT: paddw %xmm2, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm1
-; SSE4-NEXT: paddw %xmm3, %xmm1
-; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE4-NEXT: psubw %xmm2, %xmm0
-; SSE4-NEXT: psubw %xmm2, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm2, %xmm0
-; SSE4-NEXT: pand %xmm2, %xmm1
-; SSE4-NEXT: packuswb %xmm1, %xmm0
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: pavgb %xmm1, %xmm0
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpsubw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: retq
%x0 = sext <16 x i8> %a0 to <16 x i16>
%x1 = sext <16 x i8> %a1 to <16 x i16>
@@ -170,16 +102,16 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_fixed_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
; AVX-NEXT: vpsubw %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -191,95 +123,23 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
}
define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE2-NEXT: psubd %xmm2, %xmm0
-; SSE2-NEXT: psubd %xmm2, %xmm1
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE4-NEXT: paddd %xmm2, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm1
-; SSE4-NEXT: paddd %xmm3, %xmm1
-; SSE4-NEXT: pcmpeqd %xmm2, %xmm2
-; SSE4-NEXT: psubd %xmm2, %xmm0
-; SSE4-NEXT: psubd %xmm2, %xmm1
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm2, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
-; SSE4-NEXT: packusdw %xmm1, %xmm0
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v8i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v8i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = sext <8 x i16> %a0 to <8 x i32>
%x1 = sext <8 x i16> %a1 to <8 x i32>
%sum = add <8 x i32> %x0, %x1
@@ -294,16 +154,16 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: psrad $1, %xmm1
-; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_fixed_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -315,92 +175,23 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
}
define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm2, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: psubq %xmm1, %xmm4
-; SSE2-NEXT: psubq %xmm1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
-; SSE4-NEXT: paddq %xmm2, %xmm4
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
-; SSE4-NEXT: paddq %xmm3, %xmm0
-; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE4-NEXT: psubq %xmm1, %xmm4
-; SSE4-NEXT: psubq %xmm1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovqd %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = sext <4 x i32> %a0 to <4 x i64>
%x1 = sext <4 x i32> %a1 to <4 x i64>
%sum = add <4 x i64> %x0, %x1
@@ -414,54 +205,52 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: por %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: psubq %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: psubq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v2i64:
; SSE4: # %bb.0:
; SSE4-NEXT: movdqa %xmm0, %xmm2
-; SSE4-NEXT: por %xmm1, %xmm2
-; SSE4-NEXT: pxor %xmm0, %xmm1
-; SSE4-NEXT: movdqa %xmm1, %xmm0
-; SSE4-NEXT: psrad $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
-; SSE4-NEXT: psubq %xmm1, %xmm2
-; SSE4-NEXT: movdqa %xmm2, %xmm0
+; SSE4-NEXT: pxor %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: psrad $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE4-NEXT: por %xmm1, %xmm0
+; SSE4-NEXT: psubq %xmm2, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX2-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX2-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX512-NEXT: retq
@@ -475,92 +264,56 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v2i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %rdx, %rsi
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: movq %xmm1, %rdi
-; SSE2-NEXT: movq %rdi, %r8
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r9
-; SSE2-NEXT: movq %r9, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: addq %r9, %rdx
-; SSE2-NEXT: adcq %rsi, %r10
-; SSE2-NEXT: addq %rdi, %rax
-; SSE2-NEXT: adcq %rcx, %r8
-; SSE2-NEXT: addq $1, %rax
-; SSE2-NEXT: adcq $0, %r8
-; SSE2-NEXT: addq $1, %rdx
-; SSE2-NEXT: adcq $0, %r10
-; SSE2-NEXT: shldq $63, %rdx, %r10
-; SSE2-NEXT: shldq $63, %rax, %r8
-; SSE2-NEXT: movq %r8, %xmm0
-; SSE2-NEXT: movq %r10, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: psubq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v2i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: pextrq $1, %xmm0, %rax
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %xmm0, %rdx
-; SSE4-NEXT: movq %rdx, %rsi
-; SSE4-NEXT: sarq $63, %rsi
-; SSE4-NEXT: pextrq $1, %xmm1, %rdi
-; SSE4-NEXT: movq %rdi, %r8
-; SSE4-NEXT: sarq $63, %r8
-; SSE4-NEXT: movq %xmm1, %r9
-; SSE4-NEXT: movq %r9, %r10
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: addq %r9, %rdx
-; SSE4-NEXT: adcq %rsi, %r10
-; SSE4-NEXT: addq %rdi, %rax
-; SSE4-NEXT: adcq %rcx, %r8
-; SSE4-NEXT: addq $1, %rax
-; SSE4-NEXT: adcq $0, %r8
-; SSE4-NEXT: addq $1, %rdx
-; SSE4-NEXT: adcq $0, %r10
-; SSE4-NEXT: shldq $63, %rdx, %r10
-; SSE4-NEXT: shldq $63, %rax, %r8
-; SSE4-NEXT: movq %r8, %xmm1
-; SSE4-NEXT: movq %r10, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: psrad $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE4-NEXT: por %xmm1, %xmm0
+; SSE4-NEXT: psubq %xmm2, %xmm0
; SSE4-NEXT: retq
;
-; AVX-LABEL: test_ext_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrq $1, %xmm0, %rax
-; AVX-NEXT: movq %rax, %rcx
-; AVX-NEXT: sarq $63, %rcx
-; AVX-NEXT: vmovq %xmm0, %rdx
-; AVX-NEXT: movq %rdx, %rsi
-; AVX-NEXT: sarq $63, %rsi
-; AVX-NEXT: vpextrq $1, %xmm1, %rdi
-; AVX-NEXT: movq %rdi, %r8
-; AVX-NEXT: sarq $63, %r8
-; AVX-NEXT: vmovq %xmm1, %r9
-; AVX-NEXT: movq %r9, %r10
-; AVX-NEXT: sarq $63, %r10
-; AVX-NEXT: addq %r9, %rdx
-; AVX-NEXT: adcq %rsi, %r10
-; AVX-NEXT: addq %rdi, %rax
-; AVX-NEXT: adcq %rcx, %r8
-; AVX-NEXT: addq $1, %rax
-; AVX-NEXT: adcq $0, %r8
-; AVX-NEXT: addq $1, %rdx
-; AVX-NEXT: adcq $0, %r10
-; AVX-NEXT: shldq $63, %rdx, %r10
-; AVX-NEXT: shldq $63, %rax, %r8
-; AVX-NEXT: vmovq %r8, %xmm0
-; AVX-NEXT: vmovq %r10, %xmm1
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: test_ext_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX2-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
%x0 = sext <2 x i64> %a0 to <2 x i128>
%x1 = sext <2 x i64> %a1 to <2 x i128>
%sum = add <2 x i128> %x0, %x1
@@ -577,70 +330,46 @@ define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE-NEXT: pxor %xmm3, %xmm1
-; SSE-NEXT: psubb %xmm1, %xmm4
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: psubb %xmm0, %xmm5
-; SSE-NEXT: paddb %xmm3, %xmm5
-; SSE-NEXT: paddb %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpavgb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
-; AVX512-NEXT: vpsubb %ymm0, %ymm2, %ymm0
-; AVX512-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX512-NEXT: retq
%or = or <32 x i8> %a0, %a1
%xor = xor <32 x i8> %a0, %a1
@@ -650,152 +379,48 @@ define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
}
define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm7
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: paddw %xmm5, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm6, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: paddw %xmm7, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: paddw %xmm8, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE2-NEXT: psubw %xmm3, %xmm4
-; SSE2-NEXT: psubw %xmm3, %xmm0
-; SSE2-NEXT: psubw %xmm3, %xmm2
-; SSE2-NEXT: psubw %xmm3, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
-; SSE4-NEXT: paddw %xmm4, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm2
-; SSE4-NEXT: paddw %xmm5, %xmm2
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
-; SSE4-NEXT: paddw %xmm6, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm3
-; SSE4-NEXT: paddw %xmm7, %xmm3
-; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE4-NEXT: psubw %xmm4, %xmm0
-; SSE4-NEXT: psubw %xmm4, %xmm2
-; SSE4-NEXT: psubw %xmm4, %xmm1
-; SSE4-NEXT: psubw %xmm4, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm4, %xmm0
-; SSE4-NEXT: pand %xmm4, %xmm2
-; SSE4-NEXT: packuswb %xmm2, %xmm0
-; SSE4-NEXT: pand %xmm4, %xmm1
-; SSE4-NEXT: pand %xmm4, %xmm3
-; SSE4-NEXT: packuswb %xmm3, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pavgb %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: pavgb %xmm3, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmovsxbw %xmm5, %xmm6
-; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubw %xmm1, %xmm4, %xmm4
-; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpavgb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpavgb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
-; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
-; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxor %ymm2, %ymm1, %ymm1
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpavgb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX512-NEXT: retq
%x0 = sext <32 x i8> %a0 to <32 x i16>
%x1 = sext <32 x i8> %a1 to <32 x i16>
@@ -809,37 +434,37 @@ define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: psraw $1, %xmm3
-; SSE-NEXT: psubw %xmm3, %xmm4
-; SSE-NEXT: psraw $1, %xmm2
-; SSE-NEXT: psubw %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
; AVX2-NEXT: vpsubw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -847,7 +472,7 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512-LABEL: test_fixed_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
; AVX512-NEXT: vpsubw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -859,151 +484,49 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
}
define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: paddd %xmm5, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm6, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE2-NEXT: psubd %xmm4, %xmm1
-; SSE2-NEXT: psubd %xmm4, %xmm3
-; SSE2-NEXT: psubd %xmm4, %xmm0
-; SSE2-NEXT: psubd %xmm4, %xmm2
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm2, %xmm0
-; SSE2-NEXT: pslld $15, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm3, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
-; SSE4-NEXT: paddd %xmm4, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm2
-; SSE4-NEXT: paddd %xmm5, %xmm2
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
-; SSE4-NEXT: paddd %xmm6, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm3
-; SSE4-NEXT: paddd %xmm7, %xmm3
-; SSE4-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE4-NEXT: psubd %xmm4, %xmm0
-; SSE4-NEXT: psubd %xmm4, %xmm2
-; SSE4-NEXT: psubd %xmm4, %xmm1
-; SSE4-NEXT: psubd %xmm4, %xmm3
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm4, %xmm4
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm2, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm3, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmovsxwd %xmm5, %xmm6
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
-; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm4, %xmm4
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; AVX1-NEXT: vpackusdw %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <16 x i16> %a0 to <16 x i32>
%x1 = sext <16 x i16> %a1 to <16 x i32>
@@ -1017,37 +540,37 @@ define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: psrad $1, %xmm3
-; SSE-NEXT: psubd %xmm3, %xmm4
-; SSE-NEXT: psrad $1, %xmm2
-; SSE-NEXT: psubd %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -1055,7 +578,7 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-LABEL: test_fixed_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -1067,147 +590,49 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
}
define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,3,2,3]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm6, %xmm4
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
-; SSE2-NEXT: paddq %xmm2, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
-; SSE2-NEXT: paddq %xmm7, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm3, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE2-NEXT: psubq %xmm3, %xmm4
-; SSE2-NEXT: psubq %xmm3, %xmm0
-; SSE2-NEXT: psubq %xmm3, %xmm2
-; SSE2-NEXT: psubq %xmm3, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm4, %xmm5
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm4
-; SSE4-NEXT: paddq %xmm5, %xmm4
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
-; SSE4-NEXT: paddq %xmm6, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm2
-; SSE4-NEXT: paddq %xmm7, %xmm2
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
-; SSE4-NEXT: paddq %xmm8, %xmm1
-; SSE4-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE4-NEXT: psubq %xmm3, %xmm4
-; SSE4-NEXT: psubq %xmm3, %xmm0
-; SSE4-NEXT: psubq %xmm3, %xmm2
-; SSE4-NEXT: psubq %xmm3, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm5
-; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
-; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm4
-; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,2],ymm0[0,2],ymm2[4,6],ymm0[4,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm3
-; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <8 x i32> %a0 to <8 x i64>
%x1 = sext <8 x i32> %a1 to <8 x i64>
@@ -1221,81 +646,77 @@ define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: por %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: por %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: psubq %xmm1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm4
; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: psubq %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v4i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm1, %xmm4
-; SSE4-NEXT: por %xmm3, %xmm4
-; SSE4-NEXT: movdqa %xmm0, %xmm5
-; SSE4-NEXT: por %xmm2, %xmm5
-; SSE4-NEXT: pxor %xmm0, %xmm2
-; SSE4-NEXT: pxor %xmm1, %xmm3
-; SSE4-NEXT: movdqa %xmm3, %xmm0
-; SSE4-NEXT: psrad $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7]
-; SSE4-NEXT: psubq %xmm3, %xmm4
-; SSE4-NEXT: movdqa %xmm2, %xmm0
-; SSE4-NEXT: psrad $1, %xmm0
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm2, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: por %xmm2, %xmm0
+; SSE4-NEXT: psubq %xmm4, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm3, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
-; SSE4-NEXT: psubq %xmm2, %xmm5
-; SSE4-NEXT: movdqa %xmm5, %xmm0
-; SSE4-NEXT: movdqa %xmm4, %xmm1
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: por %xmm3, %xmm1
+; SSE4-NEXT: psubq %xmm2, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm3
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -1309,345 +730,79 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: movq %xmm0, %r11
-; SSE2-NEXT: movq %r11, %r12
-; SSE2-NEXT: sarq $63, %r12
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: movq %rcx, %rbx
-; SSE2-NEXT: sarq $63, %rbx
-; SSE2-NEXT: movq %xmm1, %rdx
-; SSE2-NEXT: movq %rdx, %r14
-; SSE2-NEXT: sarq $63, %r14
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r9
-; SSE2-NEXT: movq %r9, %r15
-; SSE2-NEXT: sarq $63, %r15
-; SSE2-NEXT: movq %xmm2, %rsi
-; SSE2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r13
-; SSE2-NEXT: movq %r13, %r8
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: movq %xmm3, %rbp
-; SSE2-NEXT: movq %rbp, %rdi
-; SSE2-NEXT: sarq $63, %rdi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: addq %rax, %r9
-; SSE2-NEXT: adcq %r15, %r10
-; SSE2-NEXT: addq %rbp, %rdx
-; SSE2-NEXT: adcq %r14, %rdi
-; SSE2-NEXT: addq %r13, %rcx
-; SSE2-NEXT: adcq %rbx, %r8
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE2-NEXT: adcq %r12, %rsi
-; SSE2-NEXT: addq $1, %r11
-; SSE2-NEXT: adcq $0, %rsi
-; SSE2-NEXT: addq $1, %rcx
-; SSE2-NEXT: adcq $0, %r8
-; SSE2-NEXT: addq $1, %rdx
-; SSE2-NEXT: adcq $0, %rdi
-; SSE2-NEXT: addq $1, %r9
-; SSE2-NEXT: adcq $0, %r10
-; SSE2-NEXT: shldq $63, %r9, %r10
-; SSE2-NEXT: shldq $63, %rdx, %rdi
-; SSE2-NEXT: shldq $63, %rcx, %r8
-; SSE2-NEXT: shldq $63, %r11, %rsi
-; SSE2-NEXT: movq %rsi, %xmm0
-; SSE2-NEXT: movq %r8, %xmm2
-; SSE2-NEXT: movq %rdi, %xmm1
-; SSE2-NEXT: movq %r10, %xmm3
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psubq %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: psubq %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v4i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: pextrq $1, %xmm0, %r11
-; SSE4-NEXT: movq %r11, %r12
-; SSE4-NEXT: sarq $63, %r12
-; SSE4-NEXT: movq %xmm0, %rcx
-; SSE4-NEXT: movq %rcx, %rbx
-; SSE4-NEXT: sarq $63, %rbx
-; SSE4-NEXT: pextrq $1, %xmm1, %rdx
-; SSE4-NEXT: movq %rdx, %r14
-; SSE4-NEXT: sarq $63, %r14
-; SSE4-NEXT: movq %xmm1, %r9
-; SSE4-NEXT: movq %r9, %r15
-; SSE4-NEXT: sarq $63, %r15
-; SSE4-NEXT: pextrq $1, %xmm2, %rsi
-; SSE4-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rsi
-; SSE4-NEXT: movq %xmm2, %r13
-; SSE4-NEXT: movq %r13, %r8
-; SSE4-NEXT: sarq $63, %r8
-; SSE4-NEXT: pextrq $1, %xmm3, %rbp
-; SSE4-NEXT: movq %rbp, %rdi
-; SSE4-NEXT: sarq $63, %rdi
-; SSE4-NEXT: movq %xmm3, %rax
-; SSE4-NEXT: movq %rax, %r10
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: addq %rax, %r9
-; SSE4-NEXT: adcq %r15, %r10
-; SSE4-NEXT: addq %rbp, %rdx
-; SSE4-NEXT: adcq %r14, %rdi
-; SSE4-NEXT: addq %r13, %rcx
-; SSE4-NEXT: adcq %rbx, %r8
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE4-NEXT: adcq %r12, %rsi
-; SSE4-NEXT: addq $1, %r11
-; SSE4-NEXT: adcq $0, %rsi
-; SSE4-NEXT: addq $1, %rcx
-; SSE4-NEXT: adcq $0, %r8
-; SSE4-NEXT: addq $1, %rdx
-; SSE4-NEXT: adcq $0, %rdi
-; SSE4-NEXT: addq $1, %r9
-; SSE4-NEXT: adcq $0, %r10
-; SSE4-NEXT: shldq $63, %r9, %r10
-; SSE4-NEXT: shldq $63, %rdx, %rdi
-; SSE4-NEXT: shldq $63, %rcx, %r8
-; SSE4-NEXT: shldq $63, %r11, %rsi
-; SSE4-NEXT: movq %rsi, %xmm2
-; SSE4-NEXT: movq %r8, %xmm0
-; SSE4-NEXT: movq %rdi, %xmm3
-; SSE4-NEXT: movq %r10, %xmm1
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm2, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: por %xmm2, %xmm0
+; SSE4-NEXT: psubq %xmm4, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm3, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: por %xmm3, %xmm1
+; SSE4-NEXT: psubq %xmm2, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_ext_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpextrq $1, %xmm2, %r11
-; AVX1-NEXT: movq %r11, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vmovq %xmm2, %rcx
-; AVX1-NEXT: movq %rcx, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vmovq %xmm0, %r8
-; AVX1-NEXT: movq %r8, %r15
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vmovq %xmm0, %r13
-; AVX1-NEXT: movq %r13, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpextrq $1, %xmm1, %rbp
-; AVX1-NEXT: movq %rbp, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vmovq %xmm1, %rax
-; AVX1-NEXT: movq %rax, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: addq %rax, %r8
-; AVX1-NEXT: adcq %r15, %r10
-; AVX1-NEXT: addq %rbp, %rdx
-; AVX1-NEXT: adcq %r14, %r9
-; AVX1-NEXT: addq %r13, %rcx
-; AVX1-NEXT: adcq %rbx, %rdi
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX1-NEXT: adcq %r12, %rsi
-; AVX1-NEXT: addq $1, %r11
-; AVX1-NEXT: adcq $0, %rsi
-; AVX1-NEXT: addq $1, %rcx
-; AVX1-NEXT: adcq $0, %rdi
-; AVX1-NEXT: addq $1, %rdx
-; AVX1-NEXT: adcq $0, %r9
-; AVX1-NEXT: addq $1, %r8
-; AVX1-NEXT: adcq $0, %r10
-; AVX1-NEXT: shldq $63, %r8, %r10
-; AVX1-NEXT: shldq $63, %rdx, %r9
-; AVX1-NEXT: shldq $63, %rcx, %rdi
-; AVX1-NEXT: shldq $63, %r11, %rsi
-; AVX1-NEXT: vmovq %rsi, %xmm0
-; AVX1-NEXT: vmovq %rdi, %xmm1
-; AVX1-NEXT: vmovq %r9, %xmm2
-; AVX1-NEXT: vmovq %r10, %xmm3
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpextrq $1, %xmm2, %r11
-; AVX2-NEXT: movq %r11, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vmovq %xmm2, %rcx
-; AVX2-NEXT: movq %rcx, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vmovq %xmm0, %r8
-; AVX2-NEXT: movq %r8, %r15
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX2-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vmovq %xmm0, %r13
-; AVX2-NEXT: movq %r13, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm1, %rbp
-; AVX2-NEXT: movq %rbp, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vmovq %xmm1, %rax
-; AVX2-NEXT: movq %rax, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: addq %rax, %r8
-; AVX2-NEXT: adcq %r15, %r10
-; AVX2-NEXT: addq %rbp, %rdx
-; AVX2-NEXT: adcq %r14, %r9
-; AVX2-NEXT: addq %r13, %rcx
-; AVX2-NEXT: adcq %rbx, %rdi
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX2-NEXT: adcq %r12, %rsi
-; AVX2-NEXT: addq $1, %r11
-; AVX2-NEXT: adcq $0, %rsi
-; AVX2-NEXT: addq $1, %rcx
-; AVX2-NEXT: adcq $0, %rdi
-; AVX2-NEXT: addq $1, %rdx
-; AVX2-NEXT: adcq $0, %r9
-; AVX2-NEXT: addq $1, %r8
-; AVX2-NEXT: adcq $0, %r10
-; AVX2-NEXT: shldq $63, %r8, %r10
-; AVX2-NEXT: shldq $63, %rdx, %r9
-; AVX2-NEXT: shldq $63, %rcx, %rdi
-; AVX2-NEXT: shldq $63, %r11, %rsi
-; AVX2-NEXT: vmovq %rsi, %xmm0
-; AVX2-NEXT: vmovq %rdi, %xmm1
-; AVX2-NEXT: vmovq %r9, %xmm2
-; AVX2-NEXT: vmovq %r10, %xmm3
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm3
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %r11
-; AVX512-NEXT: movq %r11, %r12
-; AVX512-NEXT: sarq $63, %r12
-; AVX512-NEXT: vmovq %xmm2, %rcx
-; AVX512-NEXT: movq %rcx, %rbx
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: sarq $63, %rbx
-; AVX512-NEXT: movq %rdx, %r14
-; AVX512-NEXT: sarq $63, %r14
-; AVX512-NEXT: vmovq %xmm0, %rdi
-; AVX512-NEXT: movq %rdi, %r15
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r15
-; AVX512-NEXT: sarq $63, %rsi
-; AVX512-NEXT: vmovq %xmm0, %r13
-; AVX512-NEXT: movq %r13, %r8
-; AVX512-NEXT: sarq $63, %r8
-; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
-; AVX512-NEXT: movq %rbp, %r9
-; AVX512-NEXT: sarq $63, %r9
-; AVX512-NEXT: vmovq %xmm1, %rax
-; AVX512-NEXT: movq %rax, %r10
-; AVX512-NEXT: sarq $63, %r10
-; AVX512-NEXT: addq %rax, %rdi
-; AVX512-NEXT: adcq %r15, %r10
-; AVX512-NEXT: addq %rbp, %rdx
-; AVX512-NEXT: adcq %r14, %r9
-; AVX512-NEXT: addq %r13, %rcx
-; AVX512-NEXT: adcq %rbx, %r8
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX512-NEXT: adcq %r12, %rsi
-; AVX512-NEXT: addq $1, %r11
-; AVX512-NEXT: adcq $0, %rsi
-; AVX512-NEXT: addq $1, %rcx
-; AVX512-NEXT: adcq $0, %r8
-; AVX512-NEXT: addq $1, %rdx
-; AVX512-NEXT: adcq $0, %r9
-; AVX512-NEXT: addq $1, %rdi
-; AVX512-NEXT: adcq $0, %r10
-; AVX512-NEXT: shldq $63, %rdi, %r10
-; AVX512-NEXT: shldq $63, %rdx, %r9
-; AVX512-NEXT: shldq $63, %rcx, %r8
-; AVX512-NEXT: shldq $63, %r11, %rsi
-; AVX512-NEXT: vmovq %rsi, %xmm0
-; AVX512-NEXT: vmovq %r8, %xmm1
-; AVX512-NEXT: vmovq %r9, %xmm2
-; AVX512-NEXT: vmovq %r10, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <4 x i64> %a0 to <4 x i128>
%x1 = sext <4 x i64> %a1 to <4 x i128>
@@ -1665,107 +820,66 @@ define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v64i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm11
-; SSE-NEXT: movdqa %xmm2, %xmm8
-; SSE-NEXT: movdqa %xmm1, %xmm9
-; SSE-NEXT: movdqa %xmm0, %xmm10
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: por %xmm6, %xmm2
-; SSE-NEXT: por %xmm5, %xmm1
-; SSE-NEXT: por %xmm4, %xmm0
-; SSE-NEXT: pxor %xmm4, %xmm10
-; SSE-NEXT: pxor %xmm5, %xmm9
-; SSE-NEXT: pxor %xmm6, %xmm8
-; SSE-NEXT: pxor %xmm7, %xmm11
-; SSE-NEXT: psrlw $1, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE-NEXT: pand %xmm5, %xmm11
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE-NEXT: pxor %xmm4, %xmm11
-; SSE-NEXT: psubb %xmm11, %xmm3
-; SSE-NEXT: psrlw $1, %xmm8
-; SSE-NEXT: pand %xmm5, %xmm8
-; SSE-NEXT: pxor %xmm4, %xmm8
-; SSE-NEXT: psubb %xmm8, %xmm2
-; SSE-NEXT: psrlw $1, %xmm9
-; SSE-NEXT: pand %xmm5, %xmm9
-; SSE-NEXT: pxor %xmm4, %xmm9
-; SSE-NEXT: psubb %xmm9, %xmm1
-; SSE-NEXT: psrlw $1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: pxor %xmm4, %xmm10
-; SSE-NEXT: psubb %xmm10, %xmm0
-; SSE-NEXT: paddb %xmm4, %xmm0
-; SSE-NEXT: paddb %xmm4, %xmm1
-; SSE-NEXT: paddb %xmm4, %xmm2
-; SSE-NEXT: paddb %xmm4, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm8, %xmm4
+; SSE-NEXT: pxor %xmm8, %xmm0
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm8, %xmm0
+; SSE-NEXT: pxor %xmm8, %xmm5
+; SSE-NEXT: pxor %xmm8, %xmm1
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm8, %xmm1
+; SSE-NEXT: pxor %xmm8, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm2
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm8, %xmm2
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: pxor %xmm8, %xmm3
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: pxor %xmm8, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
-; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
-; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT: vpsubb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm7, %xmm5, %xmm3
-; AVX1-NEXT: vpaddb %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpaddb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vxorps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpavgb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm3, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpavgb %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsubb %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb %ymm0, %ymm5, %ymm0
-; AVX2-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpavgb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
-; AVX512-NEXT: vpsubb %zmm0, %zmm2, %zmm0
-; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxorq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: retq
%or = or <64 x i8> %a0, %a1
%xor = xor <64 x i8> %a0, %a1
@@ -1775,275 +889,68 @@ define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
}
define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm8
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm0[8],xmm14[9],xmm0[9],xmm14[10],xmm0[10],xmm14[11],xmm0[11],xmm14[12],xmm0[12],xmm14[13],xmm0[13],xmm14[14],xmm0[14],xmm14[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm14
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm15
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3],xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm13
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
-; SSE2-NEXT: psraw $8, %xmm12
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; SSE2-NEXT: psraw $8, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; SSE2-NEXT: psraw $8, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; SSE2-NEXT: psraw $8, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
-; SSE2-NEXT: psraw $8, %xmm8
-; SSE2-NEXT: paddw %xmm14, %xmm8
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm15, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: paddw %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: paddw %xmm13, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: paddw %xmm12, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: paddw %xmm11, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; SSE2-NEXT: psraw $8, %xmm6
-; SSE2-NEXT: paddw %xmm10, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: paddw %xmm9, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
-; SSE2-NEXT: psubw %xmm7, %xmm8
-; SSE2-NEXT: psubw %xmm7, %xmm0
-; SSE2-NEXT: psubw %xmm7, %xmm4
-; SSE2-NEXT: psubw %xmm7, %xmm1
-; SSE2-NEXT: psubw %xmm7, %xmm5
-; SSE2-NEXT: psubw %xmm7, %xmm2
-; SSE2-NEXT: psubw %xmm7, %xmm6
-; SSE2-NEXT: psubw %xmm7, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm8
-; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm7, %xmm8
-; SSE2-NEXT: pand %xmm7, %xmm0
-; SSE2-NEXT: packuswb %xmm8, %xmm0
-; SSE2-NEXT: pand %xmm7, %xmm4
-; SSE2-NEXT: pand %xmm7, %xmm1
-; SSE2-NEXT: packuswb %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm7, %xmm5
-; SSE2-NEXT: pand %xmm7, %xmm2
-; SSE2-NEXT: packuswb %xmm5, %xmm2
-; SSE2-NEXT: pand %xmm7, %xmm6
-; SSE2-NEXT: pand %xmm7, %xmm3
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v64i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm9
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm11
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm13
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm15
-; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
-; SSE4-NEXT: paddw %xmm8, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm4
-; SSE4-NEXT: paddw %xmm9, %xmm4
-; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
-; SSE4-NEXT: paddw %xmm10, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm5
-; SSE4-NEXT: paddw %xmm11, %xmm5
-; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
-; SSE4-NEXT: paddw %xmm12, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm6
-; SSE4-NEXT: paddw %xmm13, %xmm6
-; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
-; SSE4-NEXT: paddw %xmm14, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm7, %xmm7
-; SSE4-NEXT: paddw %xmm15, %xmm7
-; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
-; SSE4-NEXT: psubw %xmm8, %xmm0
-; SSE4-NEXT: psubw %xmm8, %xmm4
-; SSE4-NEXT: psubw %xmm8, %xmm1
-; SSE4-NEXT: psubw %xmm8, %xmm5
-; SSE4-NEXT: psubw %xmm8, %xmm2
-; SSE4-NEXT: psubw %xmm8, %xmm6
-; SSE4-NEXT: psubw %xmm8, %xmm3
-; SSE4-NEXT: psubw %xmm8, %xmm7
-; SSE4-NEXT: psrlw $1, %xmm7
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm6
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm5
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm4
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm8, %xmm0
-; SSE4-NEXT: pand %xmm8, %xmm4
-; SSE4-NEXT: packuswb %xmm4, %xmm0
-; SSE4-NEXT: pand %xmm8, %xmm1
-; SSE4-NEXT: pand %xmm8, %xmm5
-; SSE4-NEXT: packuswb %xmm5, %xmm1
-; SSE4-NEXT: pand %xmm8, %xmm2
-; SSE4-NEXT: pand %xmm8, %xmm6
-; SSE4-NEXT: packuswb %xmm6, %xmm2
-; SSE4-NEXT: pand %xmm8, %xmm3
-; SSE4-NEXT: pand %xmm8, %xmm7
-; SSE4-NEXT: packuswb %xmm7, %xmm3
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; SSE-NEXT: pxor %xmm8, %xmm4
+; SSE-NEXT: pxor %xmm8, %xmm0
+; SSE-NEXT: pavgb %xmm4, %xmm0
+; SSE-NEXT: pxor %xmm8, %xmm0
+; SSE-NEXT: pxor %xmm8, %xmm5
+; SSE-NEXT: pxor %xmm8, %xmm1
+; SSE-NEXT: pavgb %xmm5, %xmm1
+; SSE-NEXT: pxor %xmm8, %xmm1
+; SSE-NEXT: pxor %xmm8, %xmm6
+; SSE-NEXT: pxor %xmm8, %xmm2
+; SSE-NEXT: pavgb %xmm6, %xmm2
+; SSE-NEXT: pxor %xmm8, %xmm2
+; SSE-NEXT: pxor %xmm8, %xmm7
+; SSE-NEXT: pxor %xmm8, %xmm3
+; SSE-NEXT: pavgb %xmm7, %xmm3
+; SSE-NEXT: pxor %xmm8, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm4, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpmovsxbw %xmm7, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm9
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
-; AVX1-NEXT: vpmovsxbw %xmm10, %xmm11
-; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
-; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
-; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm10
-; AVX1-NEXT: vpaddw %xmm10, %xmm8, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm7
-; AVX1-NEXT: vpaddw %xmm7, %xmm9, %xmm7
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm5
-; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm4
-; AVX1-NEXT: vpsubw %xmm3, %xmm6, %xmm6
-; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsubw %xmm3, %xmm8, %xmm8
-; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubw %xmm3, %xmm7, %xmm7
-; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm6
-; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm5
-; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm4
-; AVX1-NEXT: vpackuswb %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm5
-; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
-; AVX1-NEXT: vpackuswb %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm4
-; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
-; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
-; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX1-NEXT: vxorps %ymm4, %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpavgb %xmm5, %xmm6, %xmm5
+; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm4, %ymm3, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpavgb %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpavgb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vxorps %ymm4, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpmovsxbw %xmm4, %ymm4
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX2-NEXT: vpmovsxbw %xmm5, %ymm5
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
-; AVX2-NEXT: vpmovsxbw %xmm6, %ymm6
-; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vpaddw %ymm2, %ymm5, %ymm2
-; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
-; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpsubw %ymm3, %ymm4, %ymm4
-; AVX2-NEXT: vpsubw %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsubw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm3
-; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX2-NEXT: vpxor %ymm4, %ymm2, %ymm2
+; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpavgb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpavgb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbw %ymm0, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxbw %ymm1, %zmm3
-; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubw %zmm1, %zmm2, %zmm2
-; AVX512-NEXT: vpsubw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-NEXT: vpxorq %zmm2, %zmm1, %zmm1
+; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpavgb %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpxorq %zmm2, %zmm0, %zmm0
; AVX512-NEXT: retq
%x0 = sext <64 x i8> %a0 to <64 x i16>
%x1 = sext <64 x i8> %a1 to <64 x i16>
@@ -2057,69 +964,70 @@ define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: por %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: pxor %xmm0, %xmm4
-; SSE-NEXT: pxor %xmm1, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm6
-; SSE-NEXT: pxor %xmm8, %xmm7
-; SSE-NEXT: psraw $1, %xmm7
-; SSE-NEXT: psubw %xmm7, %xmm3
-; SSE-NEXT: psraw $1, %xmm6
-; SSE-NEXT: psubw %xmm6, %xmm9
-; SSE-NEXT: psraw $1, %xmm5
-; SSE-NEXT: psubw %xmm5, %xmm10
-; SSE-NEXT: psraw $1, %xmm4
-; SSE-NEXT: psubw %xmm4, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: psubw %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubw %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpsubw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v32i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
; AVX512-NEXT: vpsubw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2131,273 +1039,74 @@ define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
}
define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm3[4],xmm14[5],xmm3[5],xmm14[6],xmm3[6],xmm14[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm14
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm2[4],xmm12[5],xmm2[5],xmm12[6],xmm2[6],xmm12[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm12
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: paddd %xmm13, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: paddd %xmm14, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: paddd %xmm12, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: paddd %xmm11, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: paddd %xmm10, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm9, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm8, %xmm4
-; SSE2-NEXT: pcmpeqd %xmm8, %xmm8
-; SSE2-NEXT: psubd %xmm8, %xmm3
-; SSE2-NEXT: psubd %xmm8, %xmm7
-; SSE2-NEXT: psubd %xmm8, %xmm2
-; SSE2-NEXT: psubd %xmm8, %xmm6
-; SSE2-NEXT: psubd %xmm8, %xmm1
-; SSE2-NEXT: psubd %xmm8, %xmm5
-; SSE2-NEXT: psubd %xmm8, %xmm0
-; SSE2-NEXT: psubd %xmm8, %xmm4
-; SSE2-NEXT: pslld $15, %xmm4
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm4, %xmm0
-; SSE2-NEXT: pslld $15, %xmm5
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm5, %xmm1
-; SSE2-NEXT: pslld $15, %xmm6
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm6, %xmm2
-; SSE2-NEXT: pslld $15, %xmm7
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $15, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm7, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm9
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm11
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm13
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm15
-; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
-; SSE4-NEXT: paddd %xmm8, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm4
-; SSE4-NEXT: paddd %xmm9, %xmm4
-; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
-; SSE4-NEXT: paddd %xmm10, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm5
-; SSE4-NEXT: paddd %xmm11, %xmm5
-; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
-; SSE4-NEXT: paddd %xmm12, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm6
-; SSE4-NEXT: paddd %xmm13, %xmm6
-; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
-; SSE4-NEXT: paddd %xmm14, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm7, %xmm7
-; SSE4-NEXT: paddd %xmm15, %xmm7
-; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
-; SSE4-NEXT: psubd %xmm8, %xmm0
-; SSE4-NEXT: psubd %xmm8, %xmm4
-; SSE4-NEXT: psubd %xmm8, %xmm1
-; SSE4-NEXT: psubd %xmm8, %xmm5
-; SSE4-NEXT: psubd %xmm8, %xmm2
-; SSE4-NEXT: psubd %xmm8, %xmm6
-; SSE4-NEXT: psubd %xmm8, %xmm3
-; SSE4-NEXT: psubd %xmm8, %xmm7
-; SSE4-NEXT: psrld $1, %xmm7
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: psrld $1, %xmm6
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: psrld $1, %xmm5
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm4
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm8, %xmm8
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
-; SSE4-NEXT: packusdw %xmm4, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
-; SSE4-NEXT: packusdw %xmm5, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
-; SSE4-NEXT: packusdw %xmm6, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
-; SSE4-NEXT: packusdw %xmm7, %xmm3
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: psubw %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: psubw %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: psubw %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: psubw %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm4, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpmovsxwd %xmm7, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm9
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
-; AVX1-NEXT: vpmovsxwd %xmm10, %xmm11
-; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
-; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
-; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm10
-; AVX1-NEXT: vpaddd %xmm10, %xmm8, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm9, %xmm7
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm5
-; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm4
-; AVX1-NEXT: vpsubd %xmm3, %xmm6, %xmm6
-; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm8, %xmm8
-; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm3, %xmm7, %xmm7
-; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm6, %xmm6
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
-; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
-; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm5
-; AVX2-NEXT: vpmovsxwd %xmm5, %ymm5
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
-; AVX2-NEXT: vpmovsxwd %xmm6, %ymm6
-; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm2, %ymm5, %ymm2
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpsubd %ymm3, %ymm4, %ymm4
-; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsubd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpsubd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm4, %ymm3
-; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
-; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
-; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm3
-; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubd %zmm1, %zmm2, %zmm2
-; AVX512-NEXT: vpsubd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <32 x i16> %a0 to <32 x i32>
%x1 = sext <32 x i16> %a1 to <32 x i32>
@@ -2411,69 +1120,70 @@ define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: por %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: pxor %xmm0, %xmm4
-; SSE-NEXT: pxor %xmm1, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm6
-; SSE-NEXT: pxor %xmm8, %xmm7
-; SSE-NEXT: psrad $1, %xmm7
-; SSE-NEXT: psubd %xmm7, %xmm3
-; SSE-NEXT: psrad $1, %xmm6
-; SSE-NEXT: psubd %xmm6, %xmm9
-; SSE-NEXT: psrad $1, %xmm5
-; SSE-NEXT: psubd %xmm5, %xmm10
-; SSE-NEXT: psrad $1, %xmm4
-; SSE-NEXT: psubd %xmm4, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2485,269 +1195,74 @@ define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
}
define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm13, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm1[2,3,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm12, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm8[0],xmm12[1],xmm8[1]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm11, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm8[0],xmm11[1],xmm8[1]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm10, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[2,3,2,3]
-; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm14
-; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
-; SSE2-NEXT: paddq %xmm13, %xmm8
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm13
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1]
-; SSE2-NEXT: paddq %xmm12, %xmm4
-; SSE2-NEXT: pxor %xmm12, %xmm12
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
-; SSE2-NEXT: paddq %xmm5, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
-; SSE2-NEXT: pxor %xmm12, %xmm12
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
-; SSE2-NEXT: paddq %xmm11, %xmm5
-; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
-; SSE2-NEXT: paddq %xmm6, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
-; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm11
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
-; SSE2-NEXT: paddq %xmm10, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm9
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
-; SSE2-NEXT: paddq %xmm7, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm7, %xmm7
-; SSE2-NEXT: psubq %xmm7, %xmm8
-; SSE2-NEXT: psubq %xmm7, %xmm0
-; SSE2-NEXT: psubq %xmm7, %xmm4
-; SSE2-NEXT: psubq %xmm7, %xmm1
-; SSE2-NEXT: psubq %xmm7, %xmm5
-; SSE2-NEXT: psubq %xmm7, %xmm2
-; SSE2-NEXT: psubq %xmm7, %xmm6
-; SSE2-NEXT: psubq %xmm7, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm8
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm8, %xmm9
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm11
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm13
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm0
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm8
-; SSE4-NEXT: paddq %xmm9, %xmm8
-; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
-; SSE4-NEXT: paddq %xmm10, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
-; SSE4-NEXT: paddq %xmm11, %xmm4
-; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
-; SSE4-NEXT: paddq %xmm12, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm5
-; SSE4-NEXT: paddq %xmm13, %xmm5
-; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
-; SSE4-NEXT: paddq %xmm14, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm6
-; SSE4-NEXT: paddq %xmm15, %xmm6
-; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
-; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE4-NEXT: pcmpeqd %xmm7, %xmm7
-; SSE4-NEXT: psubq %xmm7, %xmm8
-; SSE4-NEXT: psubq %xmm7, %xmm0
-; SSE4-NEXT: psubq %xmm7, %xmm4
-; SSE4-NEXT: psubq %xmm7, %xmm1
-; SSE4-NEXT: psubq %xmm7, %xmm5
-; SSE4-NEXT: psubq %xmm7, %xmm2
-; SSE4-NEXT: psubq %xmm7, %xmm6
-; SSE4-NEXT: psubq %xmm7, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm6
-; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm6[0,2]
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm5
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[0,2]
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm8
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm8[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpmovsxdq %xmm5, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm8
-; AVX1-NEXT: vpmovsxdq %xmm8, %xmm9
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm8[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm10
-; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm10
-; AVX1-NEXT: vpmovsxdq %xmm10, %xmm11
-; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm5
-; AVX1-NEXT: vpaddq %xmm5, %xmm7, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm7
-; AVX1-NEXT: vpmovsxdq %xmm7, %xmm10
-; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm7[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
-; AVX1-NEXT: vpcmpeqd %xmm7, %xmm7, %xmm7
-; AVX1-NEXT: vpsubq %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpsubq %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpsubq %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpsubq %xmm7, %xmm9, %xmm8
-; AVX1-NEXT: vpsubq %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm4[0,2],ymm0[0,2],ymm4[4,6],ymm0[4,6]
-; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm2
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[0,2],ymm2[4,6],ymm1[4,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm4
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxdq %xmm3, %ymm6
-; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovsxdq %xmm2, %ymm6
-; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
-; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
-; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm3
-; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
-; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <16 x i32> %a0 to <16 x i64>
%x1 = sext <16 x i32> %a1 to <16 x i64>
@@ -2761,130 +1276,130 @@ define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm8
-; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: movdqa %xmm0, %xmm11
-; SSE2-NEXT: por %xmm7, %xmm3
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm4, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm11, %xmm4
-; SSE2-NEXT: pxor %xmm10, %xmm5
-; SSE2-NEXT: pxor %xmm9, %xmm6
-; SSE2-NEXT: pxor %xmm8, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[1,3,2,3]
+; SSE2-NEXT: psubq %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,3,2,3]
; SSE2-NEXT: psrad $1, %xmm8
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
-; SSE2-NEXT: psubq %xmm7, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm7
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
-; SSE2-NEXT: psubq %xmm6, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm6
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; SSE2-NEXT: psubq %xmm5, %xmm1
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: psubq %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
; SSE2-NEXT: psrad $1, %xmm5
; SSE2-NEXT: psrlq $1, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT: psubq %xmm4, %xmm0
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: psubq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: psubq %xmm4, %xmm3
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v8i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm8
-; SSE4-NEXT: movdqa %xmm2, %xmm9
-; SSE4-NEXT: movdqa %xmm1, %xmm10
-; SSE4-NEXT: movdqa %xmm0, %xmm11
-; SSE4-NEXT: por %xmm7, %xmm3
-; SSE4-NEXT: por %xmm6, %xmm2
-; SSE4-NEXT: por %xmm5, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm4, %xmm8
+; SSE4-NEXT: movdqa %xmm8, %xmm9
+; SSE4-NEXT: psrad $1, %xmm9
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
; SSE4-NEXT: por %xmm4, %xmm0
-; SSE4-NEXT: pxor %xmm11, %xmm4
-; SSE4-NEXT: pxor %xmm10, %xmm5
-; SSE4-NEXT: pxor %xmm9, %xmm6
-; SSE4-NEXT: pxor %xmm8, %xmm7
-; SSE4-NEXT: movdqa %xmm7, %xmm8
+; SSE4-NEXT: psubq %xmm8, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm8
; SSE4-NEXT: psrad $1, %xmm8
-; SSE4-NEXT: psrlq $1, %xmm7
-; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3],xmm7[4,5],xmm8[6,7]
-; SSE4-NEXT: psubq %xmm7, %xmm3
-; SSE4-NEXT: movdqa %xmm6, %xmm7
-; SSE4-NEXT: psrad $1, %xmm7
-; SSE4-NEXT: psrlq $1, %xmm6
-; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2,3],xmm6[4,5],xmm7[6,7]
-; SSE4-NEXT: psubq %xmm6, %xmm2
-; SSE4-NEXT: movdqa %xmm5, %xmm6
-; SSE4-NEXT: psrad $1, %xmm6
-; SSE4-NEXT: psrlq $1, %xmm5
-; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
-; SSE4-NEXT: psubq %xmm5, %xmm1
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3],xmm4[4,5],xmm8[6,7]
+; SSE4-NEXT: por %xmm5, %xmm1
+; SSE4-NEXT: psubq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: pxor %xmm6, %xmm4
; SSE4-NEXT: movdqa %xmm4, %xmm5
; SSE4-NEXT: psrad $1, %xmm5
; SSE4-NEXT: psrlq $1, %xmm4
; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
-; SSE4-NEXT: psubq %xmm4, %xmm0
+; SSE4-NEXT: por %xmm6, %xmm2
+; SSE4-NEXT: psubq %xmm4, %xmm2
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: pxor %xmm7, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: por %xmm7, %xmm3
+; SSE4-NEXT: psubq %xmm4, %xmm3
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrad $1, %xmm5, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpsrad $1, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlq $1, %ymm4, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7]
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm4
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7]
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2898,727 +1413,132 @@ define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rax
-; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %xmm1, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %xmm3, %rbx
-; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %rdi, %rbp
-; SSE2-NEXT: sarq $63, %rbp
-; SSE2-NEXT: movq %xmm4, %r8
-; SSE2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r11
-; SSE2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r11
-; SSE2-NEXT: movq %xmm5, %r10
-; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r15
-; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r15
-; SSE2-NEXT: movq %xmm6, %r9
-; SSE2-NEXT: movq %r9, %r14
-; SSE2-NEXT: sarq $63, %r14
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rsi
-; SSE2-NEXT: movq %rsi, %r13
-; SSE2-NEXT: sarq $63, %r13
-; SSE2-NEXT: movq %xmm7, %rdx
-; SSE2-NEXT: movq %rdx, %r12
-; SSE2-NEXT: sarq $63, %r12
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: addq %rax, %rdi
-; SSE2-NEXT: adcq %rbp, %rcx
-; SSE2-NEXT: addq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE2-NEXT: adcq %rbx, %r12
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; SSE2-NEXT: addq %rsi, %rbp
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; SSE2-NEXT: addq %r9, %rbx
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; SSE2-NEXT: adcq (%rsp), %r8 # 8-byte Folded Reload
-; SSE2-NEXT: addq $1, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: adcq $0, %r8
-; SSE2-NEXT: addq $1, %rdx
-; SSE2-NEXT: adcq $0, %r11
-; SSE2-NEXT: addq $1, %rsi
-; SSE2-NEXT: adcq $0, %r10
-; SSE2-NEXT: addq $1, %r9
-; SSE2-NEXT: adcq $0, %r15
-; SSE2-NEXT: addq $1, %rbx
-; SSE2-NEXT: adcq $0, %r14
-; SSE2-NEXT: addq $1, %rbp
-; SSE2-NEXT: adcq $0, %r13
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: addq $1, %rax
-; SSE2-NEXT: adcq $0, %r12
-; SSE2-NEXT: addq $1, %rdi
-; SSE2-NEXT: adcq $0, %rcx
-; SSE2-NEXT: shldq $63, %rdi, %rcx
-; SSE2-NEXT: shldq $63, %rax, %r12
-; SSE2-NEXT: shldq $63, %rbp, %r13
-; SSE2-NEXT: shldq $63, %rbx, %r14
-; SSE2-NEXT: shldq $63, %r9, %r15
-; SSE2-NEXT: shldq $63, %rsi, %r10
-; SSE2-NEXT: shldq $63, %rdx, %r11
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rax, %r8
-; SSE2-NEXT: movq %r8, %xmm0
-; SSE2-NEXT: movq %r11, %xmm4
-; SSE2-NEXT: movq %r10, %xmm1
-; SSE2-NEXT: movq %r15, %xmm5
-; SSE2-NEXT: movq %r14, %xmm2
-; SSE2-NEXT: movq %r13, %xmm6
-; SSE2-NEXT: movq %r12, %xmm3
-; SSE2-NEXT: movq %rcx, %xmm7
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE2-NEXT: addq $8, %rsp
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm4, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; SSE2-NEXT: por %xmm4, %xmm0
+; SSE2-NEXT: psubq %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm8
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: por %xmm5, %xmm1
+; SSE2-NEXT: psubq %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: por %xmm6, %xmm2
+; SSE2-NEXT: psubq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: por %xmm7, %xmm3
+; SSE2-NEXT: psubq %xmm4, %xmm3
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v8i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: subq $16, %rsp
-; SSE4-NEXT: pextrq $1, %xmm0, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm0, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm1, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm1, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm2, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm2, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm3, %r13
-; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r13
-; SSE4-NEXT: movq %xmm3, %rax
-; SSE4-NEXT: movq %rax, %rsi
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rsi
-; SSE4-NEXT: pextrq $1, %xmm4, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm4, %r11
-; SSE4-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r11
-; SSE4-NEXT: pextrq $1, %xmm5, %r10
-; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: movq %xmm5, %rax
-; SSE4-NEXT: movq %rax, %r14
-; SSE4-NEXT: sarq $63, %r14
-; SSE4-NEXT: pextrq $1, %xmm6, %rdi
-; SSE4-NEXT: movq %rdi, %rbx
-; SSE4-NEXT: sarq $63, %rbx
-; SSE4-NEXT: movq %xmm6, %rdx
-; SSE4-NEXT: movq %rdx, %r12
-; SSE4-NEXT: sarq $63, %r12
-; SSE4-NEXT: pextrq $1, %xmm7, %r15
-; SSE4-NEXT: movq %r15, %r9
-; SSE4-NEXT: sarq $63, %r9
-; SSE4-NEXT: movq %xmm7, %rbp
-; SSE4-NEXT: movq %rbp, %r8
-; SSE4-NEXT: sarq $63, %r8
-; SSE4-NEXT: addq %rbp, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: adcq %rsi, %r8
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: addq %r15, %rcx
-; SSE4-NEXT: adcq %r13, %r9
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; SSE4-NEXT: addq %rdx, %rbp
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; SSE4-NEXT: addq %rdi, %r13
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE4-NEXT: addq %rax, %r15
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE4-NEXT: adcq (%rsp), %r11 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; SSE4-NEXT: addq $1, %rdx
-; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: adcq $0, %rax
-; SSE4-NEXT: movq %rax, %rdx
-; SSE4-NEXT: addq $1, %rsi
-; SSE4-NEXT: adcq $0, %r11
-; SSE4-NEXT: addq $1, %rdi
-; SSE4-NEXT: adcq $0, %r10
-; SSE4-NEXT: addq $1, %r15
-; SSE4-NEXT: adcq $0, %r14
-; SSE4-NEXT: addq $1, %r13
-; SSE4-NEXT: adcq $0, %rbx
-; SSE4-NEXT: addq $1, %rbp
-; SSE4-NEXT: adcq $0, %r12
-; SSE4-NEXT: addq $1, %rcx
-; SSE4-NEXT: adcq $0, %r9
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE4-NEXT: addq $1, %rax
-; SSE4-NEXT: adcq $0, %r8
-; SSE4-NEXT: shldq $63, %rax, %r8
-; SSE4-NEXT: shldq $63, %rcx, %r9
-; SSE4-NEXT: shldq $63, %rbp, %r12
-; SSE4-NEXT: shldq $63, %r13, %rbx
-; SSE4-NEXT: shldq $63, %r15, %r14
-; SSE4-NEXT: shldq $63, %rdi, %r10
-; SSE4-NEXT: shldq $63, %rsi, %r11
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rcx, %rdx
-; SSE4-NEXT: movq %rdx, %xmm4
-; SSE4-NEXT: movq %r11, %xmm0
-; SSE4-NEXT: movq %r10, %xmm5
-; SSE4-NEXT: movq %r14, %xmm1
-; SSE4-NEXT: movq %rbx, %xmm6
-; SSE4-NEXT: movq %r12, %xmm2
-; SSE4-NEXT: movq %r9, %xmm7
-; SSE4-NEXT: movq %r8, %xmm3
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE4-NEXT: addq $16, %rsp
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm4, %xmm8
+; SSE4-NEXT: movdqa %xmm8, %xmm9
+; SSE4-NEXT: psrad $1, %xmm9
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
+; SSE4-NEXT: por %xmm4, %xmm0
+; SSE4-NEXT: psubq %xmm8, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm8
+; SSE4-NEXT: psrad $1, %xmm8
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3],xmm4[4,5],xmm8[6,7]
+; SSE4-NEXT: por %xmm5, %xmm1
+; SSE4-NEXT: psubq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: pxor %xmm6, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: por %xmm6, %xmm2
+; SSE4-NEXT: psubq %xmm4, %xmm2
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: pxor %xmm7, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: por %xmm7, %xmm3
+; SSE4-NEXT: psubq %xmm4, %xmm3
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: pushq %rax
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpextrq $1, %xmm4, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; AVX1-NEXT: vmovq %xmm4, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
-; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vmovq %xmm1, %r8
-; AVX1-NEXT: movq %r8, %rbp
-; AVX1-NEXT: sarq $63, %rbp
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
-; AVX1-NEXT: vpextrq $1, %xmm0, %r9
-; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vmovq %xmm0, %r10
-; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vpextrq $1, %xmm2, %r11
-; AVX1-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vmovq %xmm2, %r15
-; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX1-NEXT: movq %rdi, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vmovq %xmm0, %rsi
-; AVX1-NEXT: movq %rsi, %r12
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpextrq $1, %xmm3, %r13
-; AVX1-NEXT: movq %r13, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vmovq %xmm3, %rax
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: addq %rax, %r8
-; AVX1-NEXT: adcq %rbp, %rcx
-; AVX1-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT: adcq %rbx, %rdx
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX1-NEXT: addq %rsi, %rbp
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX1-NEXT: addq %rdi, %r13
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX1-NEXT: addq $1, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: adcq $0, %r9
-; AVX1-NEXT: addq $1, %rsi
-; AVX1-NEXT: adcq $0, %r10
-; AVX1-NEXT: addq $1, %rdi
-; AVX1-NEXT: adcq $0, %r11
-; AVX1-NEXT: addq $1, %rbx
-; AVX1-NEXT: adcq $0, %r15
-; AVX1-NEXT: addq $1, %r13
-; AVX1-NEXT: adcq $0, %r14
-; AVX1-NEXT: addq $1, %rbp
-; AVX1-NEXT: adcq $0, %r12
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: addq $1, %rax
-; AVX1-NEXT: adcq $0, %rdx
-; AVX1-NEXT: addq $1, %r8
-; AVX1-NEXT: adcq $0, %rcx
-; AVX1-NEXT: shldq $63, %r8, %rcx
-; AVX1-NEXT: shldq $63, %rax, %rdx
-; AVX1-NEXT: shldq $63, %rbp, %r12
-; AVX1-NEXT: shldq $63, %r13, %r14
-; AVX1-NEXT: shldq $63, %rbx, %r15
-; AVX1-NEXT: shldq $63, %rdi, %r11
-; AVX1-NEXT: shldq $63, %rsi, %r10
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rax, %r9
-; AVX1-NEXT: vmovq %r9, %xmm0
-; AVX1-NEXT: vmovq %r10, %xmm1
-; AVX1-NEXT: vmovq %r11, %xmm2
-; AVX1-NEXT: vmovq %r15, %xmm3
-; AVX1-NEXT: vmovq %r14, %xmm4
-; AVX1-NEXT: vmovq %r12, %xmm5
-; AVX1-NEXT: vmovq %rdx, %xmm6
-; AVX1-NEXT: vmovq %rcx, %xmm7
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: addq $8, %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrad $1, %xmm5, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsubq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: pushq %rax
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; AVX2-NEXT: vpextrq $1, %xmm4, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm4, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
-; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vmovq %xmm1, %r8
-; AVX2-NEXT: movq %r8, %rbp
-; AVX2-NEXT: sarq $63, %rbp
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
-; AVX2-NEXT: vpextrq $1, %xmm0, %r9
-; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vmovq %xmm0, %r10
-; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vpextrq $1, %xmm2, %r11
-; AVX2-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vmovq %xmm2, %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX2-NEXT: movq %rdi, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vmovq %xmm0, %rsi
-; AVX2-NEXT: movq %rsi, %r12
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpextrq $1, %xmm3, %r13
-; AVX2-NEXT: movq %r13, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vmovq %xmm3, %rax
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: addq %rax, %r8
-; AVX2-NEXT: adcq %rbp, %rcx
-; AVX2-NEXT: addq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: adcq %rbx, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX2-NEXT: addq %rsi, %rbp
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX2-NEXT: addq %rdi, %r13
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX2-NEXT: addq $1, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: adcq $0, %r9
-; AVX2-NEXT: addq $1, %rsi
-; AVX2-NEXT: adcq $0, %r10
-; AVX2-NEXT: addq $1, %rdi
-; AVX2-NEXT: adcq $0, %r11
-; AVX2-NEXT: addq $1, %rbx
-; AVX2-NEXT: adcq $0, %r15
-; AVX2-NEXT: addq $1, %r13
-; AVX2-NEXT: adcq $0, %r14
-; AVX2-NEXT: addq $1, %rbp
-; AVX2-NEXT: adcq $0, %r12
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: addq $1, %rax
-; AVX2-NEXT: adcq $0, %rdx
-; AVX2-NEXT: addq $1, %r8
-; AVX2-NEXT: adcq $0, %rcx
-; AVX2-NEXT: shldq $63, %r8, %rcx
-; AVX2-NEXT: shldq $63, %rax, %rdx
-; AVX2-NEXT: shldq $63, %rbp, %r12
-; AVX2-NEXT: shldq $63, %r13, %r14
-; AVX2-NEXT: shldq $63, %rbx, %r15
-; AVX2-NEXT: shldq $63, %rdi, %r11
-; AVX2-NEXT: shldq $63, %rsi, %r10
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rax, %r9
-; AVX2-NEXT: vmovq %r9, %xmm0
-; AVX2-NEXT: vmovq %r10, %xmm1
-; AVX2-NEXT: vmovq %r11, %xmm2
-; AVX2-NEXT: vmovq %r15, %xmm3
-; AVX2-NEXT: vmovq %r14, %xmm4
-; AVX2-NEXT: vmovq %r12, %xmm5
-; AVX2-NEXT: vmovq %rdx, %xmm6
-; AVX2-NEXT: vmovq %rcx, %xmm7
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: addq $8, %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpsrad $1, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlq $1, %ymm4, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7]
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm4
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7]
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: pushq %rax
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512-NEXT: vpextrq $1, %xmm3, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %rax
-; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm3, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm0, %rbx
-; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %rbx
-; AVX512-NEXT: vmovq %xmm0, %r8
-; AVX512-NEXT: movq %r8, %r13
-; AVX512-NEXT: sarq $63, %r13
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %r9
-; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r9
-; AVX512-NEXT: vmovq %xmm2, %r10
-; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r10
-; AVX512-NEXT: vpextrq $1, %xmm0, %r11
-; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r11
-; AVX512-NEXT: vmovq %xmm0, %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r14
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX512-NEXT: movq %rdi, %r15
-; AVX512-NEXT: sarq $63, %r15
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: movq %rsi, %r12
-; AVX512-NEXT: sarq $63, %r12
-; AVX512-NEXT: vpextrq $1, %xmm1, %rbp
-; AVX512-NEXT: movq %rbp, %rdx
-; AVX512-NEXT: sarq $63, %rdx
-; AVX512-NEXT: vmovq %xmm1, %rax
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: addq %rax, %r8
-; AVX512-NEXT: adcq %r13, %rcx
-; AVX512-NEXT: addq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: adcq %rbx, %rdx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
-; AVX512-NEXT: addq %rsi, %rbp
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
-; AVX512-NEXT: addq %rdi, %r13
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
-; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX512-NEXT: addq $1, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: adcq $0, %r9
-; AVX512-NEXT: addq $1, %rsi
-; AVX512-NEXT: adcq $0, %r10
-; AVX512-NEXT: addq $1, %rdi
-; AVX512-NEXT: adcq $0, %r11
-; AVX512-NEXT: addq $1, %rbx
-; AVX512-NEXT: adcq $0, %r14
-; AVX512-NEXT: addq $1, %r13
-; AVX512-NEXT: adcq $0, %r15
-; AVX512-NEXT: addq $1, %rbp
-; AVX512-NEXT: adcq $0, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: addq $1, %rax
-; AVX512-NEXT: adcq $0, %rdx
-; AVX512-NEXT: addq $1, %r8
-; AVX512-NEXT: adcq $0, %rcx
-; AVX512-NEXT: shldq $63, %r8, %rcx
-; AVX512-NEXT: shldq $63, %rax, %rdx
-; AVX512-NEXT: shldq $63, %rbp, %r12
-; AVX512-NEXT: shldq $63, %r13, %r15
-; AVX512-NEXT: shldq $63, %rbx, %r14
-; AVX512-NEXT: shldq $63, %rdi, %r11
-; AVX512-NEXT: shldq $63, %rsi, %r10
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shldq $63, %rax, %r9
-; AVX512-NEXT: vmovq %r9, %xmm0
-; AVX512-NEXT: vmovq %r10, %xmm1
-; AVX512-NEXT: vmovq %r11, %xmm2
-; AVX512-NEXT: vmovq %r14, %xmm3
-; AVX512-NEXT: vmovq %r15, %xmm4
-; AVX512-NEXT: vmovq %r12, %xmm5
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vmovq %rcx, %xmm7
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: addq $8, %rsp
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <8 x i64> %a0 to <8 x i128>
%x1 = sext <8 x i64> %a1 to <8 x i128>
diff --git a/llvm/test/CodeGen/X86/avgceilu.ll b/llvm/test/CodeGen/X86/avgceilu.ll
index d34894cc0fbb3..df2ba709a9fca 100644
--- a/llvm/test/CodeGen/X86/avgceilu.ll
+++ b/llvm/test/CodeGen/X86/avgceilu.ll
@@ -86,16 +86,16 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: psrld $1, %xmm1
-; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_fixed_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -107,85 +107,23 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
}
define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm4, %xmm2
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: paddq %xmm0, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE2-NEXT: psubq %xmm0, %xmm2
-; SSE2-NEXT: psubq %xmm0, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
-; SSE2-NEXT: movaps %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm3, %xmm3
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE4-NEXT: paddq %xmm0, %xmm1
-; SSE4-NEXT: paddq %xmm4, %xmm2
-; SSE4-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE4-NEXT: psubq %xmm0, %xmm1
-; SSE4-NEXT: psubq %xmm0, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
-; SSE4-NEXT: movaps %xmm2, %xmm0
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovqd %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = zext <4 x i32> %a0 to <4 x i64>
%x1 = zext <4 x i32> %a1 to <4 x i64>
%sum = add <4 x i64> %x0, %x1
@@ -200,16 +138,16 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm1
-; SSE-NEXT: psrlq $1, %xmm1
-; SSE-NEXT: psubq %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_fixed_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -221,81 +159,22 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
}
define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rcx
-; SSE2-NEXT: movb $1, %dl
-; SSE2-NEXT: movb $1, %sil
-; SSE2-NEXT: addb $-1, %sil
-; SSE2-NEXT: leaq 1(%rax,%rcx), %rsi
-; SSE2-NEXT: adcq %rcx, %rax
-; SSE2-NEXT: setb %al
-; SSE2-NEXT: addb $-1, %dl
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: movq %xmm1, %rdx
-; SSE2-NEXT: leaq 1(%rcx,%rdx), %rdi
-; SSE2-NEXT: adcq %rdx, %rcx
-; SSE2-NEXT: setb %cl
-; SSE2-NEXT: movzbl %cl, %ecx
-; SSE2-NEXT: movzbl %al, %eax
-; SSE2-NEXT: shrdq $1, %rcx, %rdi
-; SSE2-NEXT: shrdq $1, %rax, %rsi
-; SSE2-NEXT: movq %rdi, %xmm0
-; SSE2-NEXT: movq %rsi, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v2i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movq %xmm0, %rax
-; SSE4-NEXT: movq %xmm1, %rcx
-; SSE4-NEXT: movb $1, %dl
-; SSE4-NEXT: movb $1, %sil
-; SSE4-NEXT: addb $-1, %sil
-; SSE4-NEXT: leaq 1(%rax,%rcx), %rsi
-; SSE4-NEXT: adcq %rcx, %rax
-; SSE4-NEXT: setb %al
-; SSE4-NEXT: addb $-1, %dl
-; SSE4-NEXT: pextrq $1, %xmm0, %rcx
-; SSE4-NEXT: pextrq $1, %xmm1, %rdx
-; SSE4-NEXT: leaq 1(%rcx,%rdx), %rdi
-; SSE4-NEXT: adcq %rdx, %rcx
-; SSE4-NEXT: setb %cl
-; SSE4-NEXT: movzbl %cl, %ecx
-; SSE4-NEXT: movzbl %al, %eax
-; SSE4-NEXT: shrdq $1, %rcx, %rdi
-; SSE4-NEXT: shrdq $1, %rax, %rsi
-; SSE4-NEXT: movq %rdi, %xmm1
-; SSE4-NEXT: movq %rsi, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: por %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm2
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: test_ext_v2i64:
; AVX: # %bb.0:
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: vmovq %xmm1, %rcx
-; AVX-NEXT: movb $1, %dl
-; AVX-NEXT: movb $1, %sil
-; AVX-NEXT: addb $-1, %sil
-; AVX-NEXT: leaq 1(%rax,%rcx), %rsi
-; AVX-NEXT: adcq %rcx, %rax
-; AVX-NEXT: setb %al
-; AVX-NEXT: addb $-1, %dl
-; AVX-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX-NEXT: leaq 1(%rcx,%rdx), %rdi
-; AVX-NEXT: adcq %rdx, %rcx
-; AVX-NEXT: setb %cl
-; AVX-NEXT: movzbl %cl, %ecx
-; AVX-NEXT: movzbl %al, %eax
-; AVX-NEXT: shrdq $1, %rcx, %rdi
-; AVX-NEXT: shrdq $1, %rax, %rsi
-; AVX-NEXT: vmovq %rdi, %xmm0
-; AVX-NEXT: vmovq %rsi, %xmm1
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpsubq %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
%x0 = zext <2 x i64> %a0 to <2 x i128>
%x1 = zext <2 x i64> %a1 to <2 x i128>
@@ -445,37 +324,37 @@ define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: psrld $1, %xmm3
-; SSE-NEXT: psubd %xmm3, %xmm4
-; SSE-NEXT: psrld $1, %xmm2
-; SSE-NEXT: psubd %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -483,7 +362,7 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-LABEL: test_fixed_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -495,130 +374,49 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
}
define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm6, %xmm0
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE2-NEXT: paddq %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm7, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE2-NEXT: paddq %xmm1, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: psubq %xmm1, %xmm0
-; SSE2-NEXT: psubq %xmm1, %xmm2
-; SSE2-NEXT: psubq %xmm1, %xmm4
-; SSE2-NEXT: psubq %xmm1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; SSE2-NEXT: movaps %xmm4, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm0, %xmm4
-; SSE4-NEXT: pxor %xmm5, %xmm5
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE4-NEXT: paddq %xmm4, %xmm2
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE4-NEXT: paddq %xmm1, %xmm3
-; SSE4-NEXT: paddq %xmm6, %xmm0
-; SSE4-NEXT: paddq %xmm7, %xmm4
-; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE4-NEXT: psubq %xmm1, %xmm2
-; SSE4-NEXT: psubq %xmm1, %xmm3
-; SSE4-NEXT: psubq %xmm1, %xmm0
-; SSE4-NEXT: psubq %xmm1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; SSE4-NEXT: movaps %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpsubq %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],ymm0[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <8 x i32> %a0 to <8 x i64>
%x1 = zext <8 x i32> %a1 to <8 x i64>
@@ -632,37 +430,37 @@ define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: test_fixed_v4i64:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm1, %xmm3
-; SSE-NEXT: psrlq $1, %xmm3
-; SSE-NEXT: psubq %xmm3, %xmm4
-; SSE-NEXT: psrlq $1, %xmm2
-; SSE-NEXT: psubq %xmm2, %xmm5
-; SSE-NEXT: movdqa %xmm5, %xmm0
-; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpsubq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -670,7 +468,7 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-LABEL: test_fixed_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -682,247 +480,49 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
}
define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm4, %rdx
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: movb $1, %sil
-; SSE2-NEXT: addb $-1, %sil
-; SSE2-NEXT: leaq 1(%rcx,%rdx), %rsi
-; SSE2-NEXT: adcq %rdx, %rcx
-; SSE2-NEXT: setb %dl
-; SSE2-NEXT: movb $1, %cl
-; SSE2-NEXT: addb $-1, %cl
-; SSE2-NEXT: movq %xmm1, %rdi
-; SSE2-NEXT: movq %xmm3, %r8
-; SSE2-NEXT: leaq 1(%rdi,%r8), %rcx
-; SSE2-NEXT: adcq %r8, %rdi
-; SSE2-NEXT: setb %dil
-; SSE2-NEXT: movb $1, %r8b
-; SSE2-NEXT: addb $-1, %r8b
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %r8
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: leaq 1(%r8,%r9), %r10
-; SSE2-NEXT: adcq %r9, %r8
-; SSE2-NEXT: setb %r8b
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: movq %xmm2, %r9
-; SSE2-NEXT: leaq 1(%rax,%r9), %r11
-; SSE2-NEXT: adcq %r9, %rax
-; SSE2-NEXT: setb %al
-; SSE2-NEXT: movzbl %al, %eax
-; SSE2-NEXT: movzbl %r8b, %r8d
-; SSE2-NEXT: movzbl %dil, %edi
-; SSE2-NEXT: movzbl %dl, %edx
-; SSE2-NEXT: shrdq $1, %rax, %r11
-; SSE2-NEXT: shrdq $1, %r8, %r10
-; SSE2-NEXT: shrdq $1, %rdi, %rcx
-; SSE2-NEXT: shrdq $1, %rdx, %rsi
-; SSE2-NEXT: movq %r11, %xmm0
-; SSE2-NEXT: movq %r10, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: movq %rcx, %xmm1
-; SSE2-NEXT: movq %rsi, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movq %xmm1, %rcx
-; SSE4-NEXT: movq %xmm3, %rdx
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: movb $1, %sil
-; SSE4-NEXT: addb $-1, %sil
-; SSE4-NEXT: leaq 1(%rcx,%rdx), %rsi
-; SSE4-NEXT: adcq %rdx, %rcx
-; SSE4-NEXT: setb %dl
-; SSE4-NEXT: movb $1, %cl
-; SSE4-NEXT: addb $-1, %cl
-; SSE4-NEXT: pextrq $1, %xmm1, %rdi
-; SSE4-NEXT: pextrq $1, %xmm3, %r8
-; SSE4-NEXT: leaq 1(%rdi,%r8), %rcx
-; SSE4-NEXT: adcq %r8, %rdi
-; SSE4-NEXT: setb %dil
-; SSE4-NEXT: movb $1, %r8b
-; SSE4-NEXT: addb $-1, %r8b
-; SSE4-NEXT: movq %xmm0, %r8
-; SSE4-NEXT: movq %xmm2, %r9
-; SSE4-NEXT: leaq 1(%r8,%r9), %r10
-; SSE4-NEXT: adcq %r9, %r8
-; SSE4-NEXT: setb %r8b
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: pextrq $1, %xmm0, %rax
-; SSE4-NEXT: pextrq $1, %xmm2, %r9
-; SSE4-NEXT: leaq 1(%rax,%r9), %r11
-; SSE4-NEXT: adcq %r9, %rax
-; SSE4-NEXT: setb %al
-; SSE4-NEXT: movzbl %al, %eax
-; SSE4-NEXT: movzbl %r8b, %r8d
-; SSE4-NEXT: movzbl %dil, %edi
-; SSE4-NEXT: movzbl %dl, %edx
-; SSE4-NEXT: shrdq $1, %rax, %r11
-; SSE4-NEXT: shrdq $1, %r8, %r10
-; SSE4-NEXT: shrdq $1, %rdi, %rcx
-; SSE4-NEXT: shrdq $1, %rdx, %rsi
-; SSE4-NEXT: movq %r11, %xmm1
-; SSE4-NEXT: movq %r10, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE4-NEXT: movq %rcx, %xmm2
-; SSE4-NEXT: movq %rsi, %xmm1
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovq %xmm0, %rcx
-; AVX1-NEXT: vmovq %xmm1, %rdx
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: movb $1, %sil
-; AVX1-NEXT: addb $-1, %sil
-; AVX1-NEXT: leaq 1(%rcx,%rdx), %rsi
-; AVX1-NEXT: adcq %rdx, %rcx
-; AVX1-NEXT: setb %dl
-; AVX1-NEXT: movb $1, %cl
-; AVX1-NEXT: addb $-1, %cl
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX1-NEXT: vpextrq $1, %xmm1, %r8
-; AVX1-NEXT: leaq 1(%rdi,%r8), %rcx
-; AVX1-NEXT: adcq %r8, %rdi
-; AVX1-NEXT: setb %dil
-; AVX1-NEXT: movb $1, %r8b
-; AVX1-NEXT: addb $-1, %r8b
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r8
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vmovq %xmm1, %r9
-; AVX1-NEXT: leaq 1(%r8,%r9), %r10
-; AVX1-NEXT: adcq %r9, %r8
-; AVX1-NEXT: setb %r8b
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: vpextrq $1, %xmm1, %r9
-; AVX1-NEXT: leaq 1(%rax,%r9), %r11
-; AVX1-NEXT: adcq %r9, %rax
-; AVX1-NEXT: setb %al
-; AVX1-NEXT: movzbl %al, %eax
-; AVX1-NEXT: movzbl %r8b, %r8d
-; AVX1-NEXT: movzbl %dil, %edi
-; AVX1-NEXT: movzbl %dl, %edx
-; AVX1-NEXT: shrdq $1, %rax, %r11
-; AVX1-NEXT: shrdq $1, %r8, %r10
-; AVX1-NEXT: shrdq $1, %rdi, %rcx
-; AVX1-NEXT: shrdq $1, %rdx, %rsi
-; AVX1-NEXT: vmovq %r11, %xmm0
-; AVX1-NEXT: vmovq %r10, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: vmovq %rsi, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovq %xmm0, %rcx
-; AVX2-NEXT: vmovq %xmm1, %rdx
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: movb $1, %sil
-; AVX2-NEXT: addb $-1, %sil
-; AVX2-NEXT: leaq 1(%rcx,%rdx), %rsi
-; AVX2-NEXT: adcq %rdx, %rcx
-; AVX2-NEXT: setb %dl
-; AVX2-NEXT: movb $1, %cl
-; AVX2-NEXT: addb $-1, %cl
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm1, %r8
-; AVX2-NEXT: leaq 1(%rdi,%r8), %rcx
-; AVX2-NEXT: adcq %r8, %rdi
-; AVX2-NEXT: setb %dil
-; AVX2-NEXT: movb $1, %r8b
-; AVX2-NEXT: addb $-1, %r8b
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r8
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vmovq %xmm1, %r9
-; AVX2-NEXT: leaq 1(%r8,%r9), %r10
-; AVX2-NEXT: adcq %r9, %r8
-; AVX2-NEXT: setb %r8b
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: vpextrq $1, %xmm1, %r9
-; AVX2-NEXT: leaq 1(%rax,%r9), %r11
-; AVX2-NEXT: adcq %r9, %rax
-; AVX2-NEXT: setb %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: movzbl %r8b, %r8d
-; AVX2-NEXT: movzbl %dil, %edi
-; AVX2-NEXT: movzbl %dl, %edx
-; AVX2-NEXT: shrdq $1, %rax, %r11
-; AVX2-NEXT: shrdq $1, %r8, %r10
-; AVX2-NEXT: shrdq $1, %rdi, %rcx
-; AVX2-NEXT: shrdq $1, %rdx, %rsi
-; AVX2-NEXT: vmovq %r11, %xmm0
-; AVX2-NEXT: vmovq %r10, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vmovq %rsi, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovq %xmm0, %rcx
-; AVX512-NEXT: vmovq %xmm1, %rdx
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: movb $1, %sil
-; AVX512-NEXT: addb $-1, %sil
-; AVX512-NEXT: leaq 1(%rcx,%rdx), %rsi
-; AVX512-NEXT: adcq %rdx, %rcx
-; AVX512-NEXT: setb %dl
-; AVX512-NEXT: movb $1, %cl
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX512-NEXT: vpextrq $1, %xmm1, %r8
-; AVX512-NEXT: addb $-1, %cl
-; AVX512-NEXT: leaq 1(%rdi,%r8), %rcx
-; AVX512-NEXT: adcq %r8, %rdi
-; AVX512-NEXT: setb %dil
-; AVX512-NEXT: movb $1, %r8b
-; AVX512-NEXT: addb $-1, %r8b
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r8
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT: vmovq %xmm1, %r9
-; AVX512-NEXT: leaq 1(%r8,%r9), %r10
-; AVX512-NEXT: adcq %r9, %r8
-; AVX512-NEXT: setb %r8b
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vpextrq $1, %xmm0, %rax
-; AVX512-NEXT: vpextrq $1, %xmm1, %r9
-; AVX512-NEXT: leaq 1(%rax,%r9), %r11
-; AVX512-NEXT: adcq %r9, %rax
-; AVX512-NEXT: setb %al
-; AVX512-NEXT: movzbl %al, %eax
-; AVX512-NEXT: movzbl %r8b, %r8d
-; AVX512-NEXT: movzbl %dil, %edi
-; AVX512-NEXT: movzbl %dl, %edx
-; AVX512-NEXT: shrdq $1, %rax, %r11
-; AVX512-NEXT: shrdq $1, %r8, %r10
-; AVX512-NEXT: shrdq $1, %rdi, %rcx
-; AVX512-NEXT: shrdq $1, %rdx, %rsi
-; AVX512-NEXT: vmovq %r11, %xmm0
-; AVX512-NEXT: vmovq %r10, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vmovq %rcx, %xmm1
-; AVX512-NEXT: vmovq %rsi, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <4 x i64> %a0 to <4 x i128>
%x1 = zext <4 x i64> %a1 to <4 x i128>
@@ -1104,69 +704,70 @@ define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: por %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: pxor %xmm0, %xmm4
-; SSE-NEXT: pxor %xmm1, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm6
-; SSE-NEXT: pxor %xmm8, %xmm7
-; SSE-NEXT: psrld $1, %xmm7
-; SSE-NEXT: psubd %xmm7, %xmm3
-; SSE-NEXT: psrld $1, %xmm6
-; SSE-NEXT: psubd %xmm6, %xmm9
-; SSE-NEXT: psrld $1, %xmm5
-; SSE-NEXT: psubd %xmm5, %xmm10
-; SSE-NEXT: psrld $1, %xmm4
-; SSE-NEXT: psubd %xmm4, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubd %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -1178,231 +779,74 @@ define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
}
define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: movdqa %xmm0, %xmm10
-; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm11
-; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
-; SSE2-NEXT: movdqa %xmm8, %xmm12
-; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; SSE2-NEXT: movdqa %xmm3, %xmm13
-; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
-; SSE2-NEXT: paddq %xmm10, %xmm0
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
-; SSE2-NEXT: paddq %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
-; SSE2-NEXT: paddq %xmm11, %xmm1
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
-; SSE2-NEXT: paddq %xmm2, %xmm5
-; SSE2-NEXT: movdqa %xmm6, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
-; SSE2-NEXT: paddq %xmm12, %xmm2
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
-; SSE2-NEXT: paddq %xmm8, %xmm6
-; SSE2-NEXT: movdqa %xmm7, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
-; SSE2-NEXT: paddq %xmm13, %xmm8
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; SSE2-NEXT: paddq %xmm3, %xmm7
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE2-NEXT: psubq %xmm3, %xmm0
-; SSE2-NEXT: psubq %xmm3, %xmm4
-; SSE2-NEXT: psubq %xmm3, %xmm1
-; SSE2-NEXT: psubq %xmm3, %xmm5
-; SSE2-NEXT: psubq %xmm3, %xmm2
-; SSE2-NEXT: psubq %xmm3, %xmm6
-; SSE2-NEXT: psubq %xmm3, %xmm8
-; SSE2-NEXT: psubq %xmm3, %xmm7
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: psrlq $1, %xmm8
-; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm7[0,2]
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE2-NEXT: movaps %xmm8, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm8
-; SSE4-NEXT: movdqa %xmm2, %xmm3
-; SSE4-NEXT: movdqa %xmm1, %xmm2
-; SSE4-NEXT: movdqa %xmm0, %xmm1
-; SSE4-NEXT: pxor %xmm10, %xmm10
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm11 = xmm2[0],zero,xmm2[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm3[0],zero,xmm3[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm13 = xmm8[0],zero,xmm8[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
-; SSE4-NEXT: paddq %xmm1, %xmm4
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm10[2],xmm5[3],xmm10[3]
-; SSE4-NEXT: paddq %xmm2, %xmm5
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
-; SSE4-NEXT: paddq %xmm3, %xmm6
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
-; SSE4-NEXT: paddq %xmm8, %xmm7
-; SSE4-NEXT: paddq %xmm9, %xmm0
-; SSE4-NEXT: paddq %xmm11, %xmm1
-; SSE4-NEXT: paddq %xmm12, %xmm2
-; SSE4-NEXT: paddq %xmm13, %xmm3
-; SSE4-NEXT: pcmpeqd %xmm8, %xmm8
-; SSE4-NEXT: psubq %xmm8, %xmm4
-; SSE4-NEXT: psubq %xmm8, %xmm5
-; SSE4-NEXT: psubq %xmm8, %xmm6
-; SSE4-NEXT: psubq %xmm8, %xmm7
-; SSE4-NEXT: psubq %xmm8, %xmm0
-; SSE4-NEXT: psubq %xmm8, %xmm1
-; SSE4-NEXT: psubq %xmm8, %xmm2
-; SSE4-NEXT: psubq %xmm8, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm7
-; SSE4-NEXT: psrlq $1, %xmm6
-; SSE4-NEXT: psrlq $1, %xmm5
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: psubd %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: psubd %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: psubd %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: psubd %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm8 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm9
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
-; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX1-NEXT: vpaddq %xmm12, %xmm8, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm12
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm12[2],xmm4[2],xmm12[3],xmm4[3]
-; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm11[0],zero,xmm11[1],zero
-; AVX1-NEXT: vpaddq %xmm2, %xmm6, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm9, %xmm3
-; AVX1-NEXT: vpcmpeqd %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpsubq %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpsubq %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vpsubq %xmm6, %xmm8, %xmm8
-; AVX1-NEXT: vpsubq %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpsubq %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpsubq %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsubq %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm7
-; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm2
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpsubq %ymm2, %ymm4, %ymm3
-; AVX2-NEXT: vpsubq %ymm2, %ymm5, %ymm4
-; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm4[2,3],ymm0[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm4, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm3[2,3],ymm1[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; AVX512-NEXT: vpsubq %zmm1, %zmm2, %zmm2
-; AVX512-NEXT: vpsubq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <16 x i32> %a0 to <16 x i64>
%x1 = zext <16 x i32> %a1 to <16 x i64>
@@ -1416,69 +860,70 @@ define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i64:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: por %xmm7, %xmm3
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: por %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: por %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: por %xmm4, %xmm11
-; SSE-NEXT: pxor %xmm0, %xmm4
-; SSE-NEXT: pxor %xmm1, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm6
-; SSE-NEXT: pxor %xmm8, %xmm7
-; SSE-NEXT: psrlq $1, %xmm7
-; SSE-NEXT: psubq %xmm7, %xmm3
-; SSE-NEXT: psrlq $1, %xmm6
-; SSE-NEXT: psubq %xmm6, %xmm9
-; SSE-NEXT: psrlq $1, %xmm5
-; SSE-NEXT: psubq %xmm5, %xmm10
-; SSE-NEXT: psrlq $1, %xmm4
-; SSE-NEXT: psubq %xmm4, %xmm11
-; SSE-NEXT: movdqa %xmm11, %xmm0
-; SSE-NEXT: movdqa %xmm10, %xmm1
-; SSE-NEXT: movdqa %xmm9, %xmm2
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: psubq %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: psubq %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpsubq %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpsubq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpsubq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpsubq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsubq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpsubq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -1490,601 +935,74 @@ define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
}
define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm8, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
-; SSE2-NEXT: movq %xmm8, %rdx
-; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: movq %rcx, %rax
-; SSE2-NEXT: adcq %rdx, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: movq %xmm3, %r12
-; SSE2-NEXT: movq %xmm7, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %r12, %rax
-; SSE2-NEXT: adcq %rcx, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %r11
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rbx
-; SSE2-NEXT: movq %r11, %rax
-; SSE2-NEXT: adcq %rbx, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: movq %xmm2, %r14
-; SSE2-NEXT: movq %xmm6, %r15
-; SSE2-NEXT: movq %r14, %rax
-; SSE2-NEXT: adcq %r15, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %r13
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %r10
-; SSE2-NEXT: movq %r13, %rax
-; SSE2-NEXT: adcq %r10, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: movq %xmm5, %r8
-; SSE2-NEXT: movq %r9, %rax
-; SSE2-NEXT: adcq %r8, %rax
-; SSE2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE2-NEXT: movb $1, %al
-; SSE2-NEXT: addb $-1, %al
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rdi
-; SSE2-NEXT: movq %xmm2, %rsi
-; SSE2-NEXT: movq %rdi, %rdx
-; SSE2-NEXT: adcq %rsi, %rdx
-; SSE2-NEXT: movb $1, %dl
-; SSE2-NEXT: setb %bpl
-; SSE2-NEXT: addb $-1, %dl
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: movq %xmm4, %rax
-; SSE2-NEXT: movq %rcx, %rdx
-; SSE2-NEXT: adcq %rax, %rdx
-; SSE2-NEXT: leaq 1(%rcx,%rax), %rdx
-; SSE2-NEXT: leaq 1(%rdi,%rsi), %rax
-; SSE2-NEXT: leaq 1(%r9,%r8), %rcx
-; SSE2-NEXT: leaq 1(%r13,%r10), %rdi
-; SSE2-NEXT: leaq 1(%r14,%r15), %rsi
-; SSE2-NEXT: leaq 1(%r11,%rbx), %r11
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE2-NEXT: leaq 1(%r12,%r8), %r9
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; SSE2-NEXT: leaq 1(%r8,%r10), %r10
-; SSE2-NEXT: setb %r8b
-; SSE2-NEXT: movzbl %r8b, %r8d
-; SSE2-NEXT: shrdq $1, %r8, %rdx
-; SSE2-NEXT: movzbl %bpl, %r8d
-; SSE2-NEXT: shrdq $1, %r8, %rax
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %rcx
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %rdi
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %rsi
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %r11
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %r9
-; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r8d # 1-byte Folded Reload
-; SSE2-NEXT: shrdq $1, %r8, %r10
-; SSE2-NEXT: movq %rdx, %xmm0
-; SSE2-NEXT: movq %rax, %xmm4
-; SSE2-NEXT: movq %rcx, %xmm1
-; SSE2-NEXT: movq %rdi, %xmm5
-; SSE2-NEXT: movq %rsi, %xmm2
-; SSE2-NEXT: movq %r11, %xmm6
-; SSE2-NEXT: movq %r9, %xmm3
-; SSE2-NEXT: movq %r10, %xmm7
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: movq %xmm3, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm7, %rdx
-; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: movq %rcx, %rax
-; SSE4-NEXT: adcq %rdx, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: pextrq $1, %xmm3, %r12
-; SSE4-NEXT: pextrq $1, %xmm7, %rbp
-; SSE4-NEXT: movq %r12, %rax
-; SSE4-NEXT: adcq %rbp, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: movq %xmm2, %r11
-; SSE4-NEXT: movq %xmm6, %rbx
-; SSE4-NEXT: movq %r11, %rax
-; SSE4-NEXT: adcq %rbx, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: pextrq $1, %xmm2, %r14
-; SSE4-NEXT: pextrq $1, %xmm6, %r15
-; SSE4-NEXT: movq %r14, %rax
-; SSE4-NEXT: adcq %r15, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: movq %xmm1, %r13
-; SSE4-NEXT: movq %xmm5, %r10
-; SSE4-NEXT: movq %r13, %rax
-; SSE4-NEXT: adcq %r10, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: pextrq $1, %xmm1, %r9
-; SSE4-NEXT: pextrq $1, %xmm5, %r8
-; SSE4-NEXT: movq %r9, %rax
-; SSE4-NEXT: adcq %r8, %rax
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: movb $1, %al
-; SSE4-NEXT: addb $-1, %al
-; SSE4-NEXT: movq %xmm0, %rdi
-; SSE4-NEXT: movq %xmm4, %rsi
-; SSE4-NEXT: movq %rdi, %rdx
-; SSE4-NEXT: adcq %rsi, %rdx
-; SSE4-NEXT: movb $1, %dl
-; SSE4-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; SSE4-NEXT: addb $-1, %dl
-; SSE4-NEXT: pextrq $1, %xmm0, %rcx
-; SSE4-NEXT: pextrq $1, %xmm4, %rax
-; SSE4-NEXT: movq %rcx, %rdx
-; SSE4-NEXT: adcq %rax, %rdx
-; SSE4-NEXT: leaq 1(%rcx,%rax), %rdx
-; SSE4-NEXT: leaq 1(%rdi,%rsi), %rax
-; SSE4-NEXT: leaq 1(%r9,%r8), %rcx
-; SSE4-NEXT: leaq 1(%r13,%r10), %rdi
-; SSE4-NEXT: leaq 1(%r14,%r15), %rsi
-; SSE4-NEXT: leaq 1(%r11,%rbx), %r11
-; SSE4-NEXT: leaq 1(%r12,%rbp), %r8
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; SSE4-NEXT: leaq 1(%r9,%r10), %r9
-; SSE4-NEXT: setb %r10b
-; SSE4-NEXT: movzbl %r10b, %r10d
-; SSE4-NEXT: shrdq $1, %r10, %rdx
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %rax
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %rcx
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %rdi
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %rsi
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %r11
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %r8
-; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; SSE4-NEXT: shrdq $1, %r10, %r9
-; SSE4-NEXT: movq %rdx, %xmm4
-; SSE4-NEXT: movq %rax, %xmm0
-; SSE4-NEXT: movq %rcx, %xmm5
-; SSE4-NEXT: movq %rdi, %xmm1
-; SSE4-NEXT: movq %rsi, %xmm6
-; SSE4-NEXT: movq %r11, %xmm2
-; SSE4-NEXT: movq %r8, %xmm7
-; SSE4-NEXT: movq %r9, %xmm3
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: por %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: psubq %xmm0, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: por %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: psubq %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: psubq %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm3, %xmm6
+; SSE-NEXT: por %xmm7, %xmm6
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: psubq %xmm3, %xmm6
+; SSE-NEXT: movdqa %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: vmovq %xmm1, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vmovq %xmm3, %rdx
-; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: movq %rcx, %rax
-; AVX1-NEXT: adcq %rdx, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vpextrq $1, %xmm1, %r12
-; AVX1-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %r12, %rax
-; AVX1-NEXT: adcq %rcx, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vmovq %xmm1, %r11
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vmovq %xmm3, %rbx
-; AVX1-NEXT: movq %r11, %rax
-; AVX1-NEXT: adcq %rbx, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vpextrq $1, %xmm1, %r14
-; AVX1-NEXT: vpextrq $1, %xmm3, %r15
-; AVX1-NEXT: movq %r14, %rax
-; AVX1-NEXT: adcq %r15, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vmovq %xmm0, %r13
-; AVX1-NEXT: vmovq %xmm2, %r10
-; AVX1-NEXT: movq %r13, %rax
-; AVX1-NEXT: adcq %r10, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vpextrq $1, %xmm0, %r9
-; AVX1-NEXT: vpextrq $1, %xmm2, %r8
-; AVX1-NEXT: movq %r9, %rax
-; AVX1-NEXT: adcq %r8, %rax
-; AVX1-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX1-NEXT: movb $1, %al
-; AVX1-NEXT: addb $-1, %al
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
-; AVX1-NEXT: vmovq %xmm0, %rdi
-; AVX1-NEXT: vmovq %xmm1, %rsi
-; AVX1-NEXT: movq %rdi, %rcx
-; AVX1-NEXT: adcq %rsi, %rcx
-; AVX1-NEXT: movb $1, %cl
-; AVX1-NEXT: setb %bpl
-; AVX1-NEXT: addb $-1, %cl
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX1-NEXT: vpextrq $1, %xmm1, %rax
-; AVX1-NEXT: movq %rdx, %rcx
-; AVX1-NEXT: adcq %rax, %rcx
-; AVX1-NEXT: leaq 1(%rdx,%rax), %rcx
-; AVX1-NEXT: leaq 1(%rdi,%rsi), %rax
-; AVX1-NEXT: leaq 1(%r9,%r8), %rdx
-; AVX1-NEXT: leaq 1(%r13,%r10), %rdi
-; AVX1-NEXT: leaq 1(%r14,%r15), %rsi
-; AVX1-NEXT: leaq 1(%r11,%rbx), %r11
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX1-NEXT: leaq 1(%r12,%r8), %r9
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX1-NEXT: leaq 1(%r8,%r10), %r8
-; AVX1-NEXT: setb %r10b
-; AVX1-NEXT: movzbl %r10b, %r10d
-; AVX1-NEXT: shrdq $1, %r10, %rcx
-; AVX1-NEXT: movzbl %bpl, %r10d
-; AVX1-NEXT: shrdq $1, %r10, %rax
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %rdx
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %rdi
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %rsi
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %r11
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %r9
-; AVX1-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX1-NEXT: shrdq $1, %r10, %r8
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: vmovq %rax, %xmm1
-; AVX1-NEXT: vmovq %rdx, %xmm2
-; AVX1-NEXT: vmovq %rdi, %xmm3
-; AVX1-NEXT: vmovq %rsi, %xmm4
-; AVX1-NEXT: vmovq %r11, %xmm5
-; AVX1-NEXT: vmovq %r9, %xmm6
-; AVX1-NEXT: vmovq %r8, %xmm7
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpsubq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpsubq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vmovq %xmm1, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm3, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: movq %rcx, %rax
-; AVX2-NEXT: adcq %rdx, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vpextrq $1, %xmm1, %r12
-; AVX2-NEXT: vpextrq $1, %xmm3, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %r12, %rax
-; AVX2-NEXT: adcq %rcx, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vmovq %xmm1, %r11
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vmovq %xmm3, %rbx
-; AVX2-NEXT: movq %r11, %rax
-; AVX2-NEXT: adcq %rbx, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vpextrq $1, %xmm1, %r14
-; AVX2-NEXT: vpextrq $1, %xmm3, %r15
-; AVX2-NEXT: movq %r14, %rax
-; AVX2-NEXT: adcq %r15, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vmovq %xmm0, %r13
-; AVX2-NEXT: vmovq %xmm2, %r10
-; AVX2-NEXT: movq %r13, %rax
-; AVX2-NEXT: adcq %r10, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vpextrq $1, %xmm0, %r9
-; AVX2-NEXT: vpextrq $1, %xmm2, %r8
-; AVX2-NEXT: movq %r9, %rax
-; AVX2-NEXT: adcq %r8, %rax
-; AVX2-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX2-NEXT: movb $1, %al
-; AVX2-NEXT: addb $-1, %al
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm1
-; AVX2-NEXT: vmovq %xmm0, %rdi
-; AVX2-NEXT: vmovq %xmm1, %rsi
-; AVX2-NEXT: movq %rdi, %rcx
-; AVX2-NEXT: adcq %rsi, %rcx
-; AVX2-NEXT: movb $1, %cl
-; AVX2-NEXT: setb %bpl
-; AVX2-NEXT: addb $-1, %cl
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX2-NEXT: vpextrq $1, %xmm1, %rax
-; AVX2-NEXT: movq %rdx, %rcx
-; AVX2-NEXT: adcq %rax, %rcx
-; AVX2-NEXT: leaq 1(%rdx,%rax), %rcx
-; AVX2-NEXT: leaq 1(%rdi,%rsi), %rax
-; AVX2-NEXT: leaq 1(%r9,%r8), %rdx
-; AVX2-NEXT: leaq 1(%r13,%r10), %rdi
-; AVX2-NEXT: leaq 1(%r14,%r15), %rsi
-; AVX2-NEXT: leaq 1(%r11,%rbx), %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: leaq 1(%r12,%r8), %r9
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX2-NEXT: leaq 1(%r8,%r10), %r8
-; AVX2-NEXT: setb %r10b
-; AVX2-NEXT: movzbl %r10b, %r10d
-; AVX2-NEXT: shrdq $1, %r10, %rcx
-; AVX2-NEXT: movzbl %bpl, %r10d
-; AVX2-NEXT: shrdq $1, %r10, %rax
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %rdx
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %rdi
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %rsi
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %r11
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %r9
-; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX2-NEXT: shrdq $1, %r10, %r8
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: vmovq %rax, %xmm1
-; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vmovq %rdi, %xmm3
-; AVX2-NEXT: vmovq %rsi, %xmm4
-; AVX2-NEXT: vmovq %r11, %xmm5
-; AVX2-NEXT: vmovq %r9, %xmm6
-; AVX2-NEXT: vmovq %r8, %xmm7
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubq %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpsubq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vmovq %xmm0, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vmovq %xmm1, %rdx
-; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: movq %rcx, %rax
-; AVX512-NEXT: adcq %rdx, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: vpextrq $1, %xmm0, %r12
-; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: movq %r12, %rax
-; AVX512-NEXT: adcq %rcx, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %r11
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-NEXT: vmovq %xmm3, %rbx
-; AVX512-NEXT: movq %r11, %rax
-; AVX512-NEXT: adcq %rbx, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vpextrq $1, %xmm2, %r14
-; AVX512-NEXT: vpextrq $1, %xmm3, %r15
-; AVX512-NEXT: movq %r14, %rax
-; AVX512-NEXT: adcq %r15, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vmovq %xmm0, %r13
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512-NEXT: vmovq %xmm1, %r10
-; AVX512-NEXT: movq %r13, %rax
-; AVX512-NEXT: adcq %r10, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: vpextrq $1, %xmm0, %r9
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vpextrq $1, %xmm1, %r8
-; AVX512-NEXT: movq %r9, %rax
-; AVX512-NEXT: adcq %r8, %rax
-; AVX512-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
-; AVX512-NEXT: movb $1, %al
-; AVX512-NEXT: addb $-1, %al
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT: vmovq %xmm0, %rdi
-; AVX512-NEXT: vmovq %xmm1, %rsi
-; AVX512-NEXT: movq %rdi, %rcx
-; AVX512-NEXT: adcq %rsi, %rcx
-; AVX512-NEXT: movb $1, %cl
-; AVX512-NEXT: setb %bpl
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm1, %rax
-; AVX512-NEXT: addb $-1, %cl
-; AVX512-NEXT: movq %rdx, %rcx
-; AVX512-NEXT: adcq %rax, %rcx
-; AVX512-NEXT: leaq 1(%rdx,%rax), %rcx
-; AVX512-NEXT: leaq 1(%rdi,%rsi), %rax
-; AVX512-NEXT: leaq 1(%r9,%r8), %rdx
-; AVX512-NEXT: leaq 1(%r13,%r10), %rdi
-; AVX512-NEXT: leaq 1(%r14,%r15), %rsi
-; AVX512-NEXT: leaq 1(%r11,%rbx), %r11
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: leaq 1(%r12,%r8), %r9
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: leaq 1(%r8,%r10), %r8
-; AVX512-NEXT: setb %r10b
-; AVX512-NEXT: movzbl %r10b, %r10d
-; AVX512-NEXT: shrdq $1, %r10, %rcx
-; AVX512-NEXT: movzbl %bpl, %r10d
-; AVX512-NEXT: shrdq $1, %r10, %rax
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %rdx
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %rdi
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %rsi
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %r11
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %r9
-; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload
-; AVX512-NEXT: shrdq $1, %r10, %r8
-; AVX512-NEXT: vmovq %rcx, %xmm0
-; AVX512-NEXT: vmovq %rax, %xmm1
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vmovq %rdi, %xmm3
-; AVX512-NEXT: vmovq %rsi, %xmm4
-; AVX512-NEXT: vmovq %r11, %xmm5
-; AVX512-NEXT: vmovq %r9, %xmm6
-; AVX512-NEXT: vmovq %r8, %xmm7
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpsubq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <8 x i64> %a0 to <8 x i128>
%x1 = zext <8 x i64> %a1 to <8 x i128>
@@ -2095,3 +1013,6 @@ define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
ret <8 x i64> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SSE2: {{.*}}
+; SSE4: {{.*}}
diff --git a/llvm/test/CodeGen/X86/avgfloors.ll b/llvm/test/CodeGen/X86/avgfloors.ll
index efee831a15c77..db6f61ed434fd 100644
--- a/llvm/test/CodeGen/X86/avgfloors.ll
+++ b/llvm/test/CodeGen/X86/avgfloors.ll
@@ -65,82 +65,52 @@ define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
}
define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm2, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: paddw %xmm3, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: packuswb %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm2
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
-; SSE4-NEXT: paddw %xmm2, %xmm4
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm0
-; SSE4-NEXT: paddw %xmm3, %xmm0
-; SSE4-NEXT: psrlw $1, %xmm4
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm1, %xmm0
-; SSE4-NEXT: pand %xmm1, %xmm4
-; SSE4-NEXT: packuswb %xmm4, %xmm0
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: psubb %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm0
+; AVX512-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%x0 = sext <16 x i8> %a0 to <16 x i16>
%x1 = sext <16 x i8> %a1 to <16 x i16>
@@ -163,7 +133,7 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; AVX-LABEL: test_fixed_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -175,82 +145,22 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
}
define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm2, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: pslld $15, %xmm4
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm2
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
-; SSE4-NEXT: paddd %xmm2, %xmm4
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm0
-; SSE4-NEXT: paddd %xmm3, %xmm0
-; SSE4-NEXT: psrld $1, %xmm4
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm1, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
-; SSE4-NEXT: packusdw %xmm4, %xmm0
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v8i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
-; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v8i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = sext <8 x i16> %a0 to <8 x i32>
%x1 = sext <8 x i16> %a1 to <8 x i32>
%sum = add <8 x i32> %x0, %x1
@@ -272,7 +182,7 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; AVX-LABEL: test_fixed_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -284,79 +194,22 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
}
define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
-; SSE2-NEXT: paddq %xmm4, %xmm5
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm0
-; SSE4-NEXT: paddq %xmm2, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm1
-; SSE4-NEXT: paddq %xmm3, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[0,2]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX512-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovqd %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = sext <4 x i32> %a0 to <4 x i64>
%x1 = sext <4 x i32> %a1 to <4 x i64>
%sum = add <4 x i64> %x0, %x1
@@ -369,52 +222,52 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pand %xmm1, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
; SSE2-NEXT: psrad $1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: pand %xmm1, %xmm0
; SSE2-NEXT: paddq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v2i64:
; SSE4: # %bb.0:
; SSE4-NEXT: movdqa %xmm0, %xmm2
-; SSE4-NEXT: pand %xmm1, %xmm2
-; SSE4-NEXT: pxor %xmm1, %xmm0
-; SSE4-NEXT: movdqa %xmm0, %xmm1
-; SSE4-NEXT: psrad $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
+; SSE4-NEXT: pxor %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: psrad $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE4-NEXT: pand %xmm1, %xmm0
; SSE4-NEXT: paddq %xmm2, %xmm0
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vpxor %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
-; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vpxor %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX2-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; AVX2-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX2-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v2i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX512-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
; AVX512-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX512-NEXT: retq
@@ -428,80 +281,56 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v2i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %rdx, %rsi
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %rdi, %r8
-; SSE2-NEXT: sarq $63, %r8
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: movq %r9, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: addq %r9, %rdx
-; SSE2-NEXT: adcq %rsi, %r10
-; SSE2-NEXT: addq %rdi, %rax
-; SSE2-NEXT: adcq %rcx, %r8
-; SSE2-NEXT: shldq $63, %rax, %r8
-; SSE2-NEXT: shldq $63, %rdx, %r10
-; SSE2-NEXT: movq %r10, %xmm0
-; SSE2-NEXT: movq %r8, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE2-NEXT: pand %xmm1, %xmm0
+; SSE2-NEXT: paddq %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v2i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: movq %xmm0, %rax
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: pextrq $1, %xmm0, %rdx
-; SSE4-NEXT: movq %rdx, %rsi
-; SSE4-NEXT: sarq $63, %rsi
-; SSE4-NEXT: movq %xmm1, %rdi
-; SSE4-NEXT: movq %rdi, %r8
-; SSE4-NEXT: sarq $63, %r8
-; SSE4-NEXT: pextrq $1, %xmm1, %r9
-; SSE4-NEXT: movq %r9, %r10
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: addq %r9, %rdx
-; SSE4-NEXT: adcq %rsi, %r10
-; SSE4-NEXT: addq %rdi, %rax
-; SSE4-NEXT: adcq %rcx, %r8
-; SSE4-NEXT: shldq $63, %rax, %r8
-; SSE4-NEXT: shldq $63, %rdx, %r10
-; SSE4-NEXT: movq %r10, %xmm1
-; SSE4-NEXT: movq %r8, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE4-NEXT: movdqa %xmm0, %xmm2
+; SSE4-NEXT: pxor %xmm1, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm3
+; SSE4-NEXT: psrad $1, %xmm3
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; SSE4-NEXT: pand %xmm1, %xmm0
+; SSE4-NEXT: paddq %xmm2, %xmm0
; SSE4-NEXT: retq
;
-; AVX-LABEL: test_ext_v2i64:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: movq %rax, %rcx
-; AVX-NEXT: sarq $63, %rcx
-; AVX-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX-NEXT: movq %rdx, %rsi
-; AVX-NEXT: sarq $63, %rsi
-; AVX-NEXT: vmovq %xmm1, %rdi
-; AVX-NEXT: movq %rdi, %r8
-; AVX-NEXT: sarq $63, %r8
-; AVX-NEXT: vpextrq $1, %xmm1, %r9
-; AVX-NEXT: movq %r9, %r10
-; AVX-NEXT: sarq $63, %r10
-; AVX-NEXT: addq %r9, %rdx
-; AVX-NEXT: adcq %rsi, %r10
-; AVX-NEXT: addq %rdi, %rax
-; AVX-NEXT: adcq %rcx, %r8
-; AVX-NEXT: shldq $63, %rax, %r8
-; AVX-NEXT: shldq $63, %rdx, %r10
-; AVX-NEXT: vmovq %r10, %xmm0
-; AVX-NEXT: vmovq %r8, %xmm1
-; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX-NEXT: retq
+; AVX1-LABEL: test_ext_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_ext_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX2-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ext_v2i64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $1, %xmm0, %xmm0
+; AVX512-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX512-NEXT: retq
%x0 = sext <2 x i64> %a0 to <2 x i128>
%x1 = sext <2 x i64> %a1 to <2 x i128>
%sum = add <2 x i128> %x0, %x1
@@ -517,45 +346,45 @@ define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i8:
; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm5, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
+; SSE-NEXT: psubb %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: pxor %xmm5, %xmm1
; SSE-NEXT: paddb %xmm4, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: paddb %xmm5, %xmm0
-; SSE-NEXT: psubb %xmm3, %xmm0
-; SSE-NEXT: psubb %xmm3, %xmm1
+; SSE-NEXT: psubb %xmm5, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpsubb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i8:
@@ -588,132 +417,70 @@ define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
}
define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm7
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: paddw %xmm4, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: paddw %xmm5, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm6, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: paddw %xmm7, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm4, %xmm5
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm7
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm4
-; SSE4-NEXT: paddw %xmm5, %xmm4
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm1
-; SSE4-NEXT: paddw %xmm6, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm3
-; SSE4-NEXT: paddw %xmm7, %xmm3
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm0
-; SSE4-NEXT: paddw %xmm8, %xmm0
-; SSE4-NEXT: psrlw $1, %xmm4
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm2, %xmm0
-; SSE4-NEXT: pand %xmm2, %xmm3
-; SSE4-NEXT: packuswb %xmm3, %xmm0
-; SSE4-NEXT: pand %xmm2, %xmm1
-; SSE4-NEXT: pand %xmm2, %xmm4
-; SSE4-NEXT: packuswb %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm5, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
+; SSE-NEXT: psubb %xmm5, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: psubb %xmm5, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm5, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsubb %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpsubb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm3
-; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpackuswb %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} ymm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
+; AVX512-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%x0 = sext <32 x i8> %a0 to <32 x i16>
%x1 = sext <32 x i8> %a1 to <32 x i16>
@@ -726,35 +493,35 @@ define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: paddw %xmm4, %xmm1
-; SSE-NEXT: psraw $1, %xmm0
-; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -762,7 +529,7 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512-LABEL: test_fixed_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -774,131 +541,47 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
}
define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: paddd %xmm4, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm5, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: paddd %xmm7, %xmm1
-; SSE2-NEXT: pslld $15, %xmm8
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm8, %xmm0
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm2, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm4, %xmm5
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm7
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm4
-; SSE4-NEXT: paddd %xmm5, %xmm4
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm1
-; SSE4-NEXT: paddd %xmm6, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm3
-; SSE4-NEXT: paddd %xmm7, %xmm3
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm0
-; SSE4-NEXT: paddd %xmm8, %xmm0
-; SSE4-NEXT: psrld $1, %xmm4
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm2, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
-; SSE4-NEXT: packusdw %xmm3, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
-; SSE4-NEXT: packusdw %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
-; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
-; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm5, %xmm5
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm3
-; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
-; AVX2-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <16 x i16> %a0 to <16 x i32>
%x1 = sext <16 x i16> %a1 to <16 x i32>
@@ -911,35 +594,35 @@ define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrad $1, %xmm1
-; SSE-NEXT: paddd %xmm4, %xmm1
-; SSE-NEXT: psrad $1, %xmm0
-; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -947,7 +630,7 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-LABEL: test_fixed_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -959,127 +642,47 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
}
define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm8, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm6, %xmm8
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm2, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; SSE2-NEXT: paddq %xmm7, %xmm5
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm8
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm8[0,2]
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm4
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm5
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm1
-; SSE4-NEXT: paddq %xmm4, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm3
-; SSE4-NEXT: paddq %xmm5, %xmm3
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm0
-; SSE4-NEXT: paddq %xmm6, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm2
-; SSE4-NEXT: paddq %xmm7, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
-; AVX1-NEXT: vpaddq %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
-; AVX1-NEXT: vpaddq %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
-; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
-; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <8 x i32> %a0 to <8 x i64>
%x1 = sext <8 x i32> %a1 to <8 x i64>
@@ -1092,77 +695,77 @@ define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm2, %xmm5
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: paddq %xmm4, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm4
; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: paddq %xmm5, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: paddq %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v4i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm1, %xmm4
-; SSE4-NEXT: pand %xmm3, %xmm4
-; SSE4-NEXT: movdqa %xmm0, %xmm5
-; SSE4-NEXT: pand %xmm2, %xmm5
-; SSE4-NEXT: pxor %xmm2, %xmm0
-; SSE4-NEXT: pxor %xmm3, %xmm1
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm2, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm4, %xmm0
; SSE4-NEXT: movdqa %xmm1, %xmm2
-; SSE4-NEXT: psrad $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE4-NEXT: paddq %xmm4, %xmm1
-; SSE4-NEXT: movdqa %xmm0, %xmm2
-; SSE4-NEXT: psrad $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE4-NEXT: paddq %xmm5, %xmm0
+; SSE4-NEXT: pxor %xmm3, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: pand %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm2, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: vpsrad $1, %ymm0, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm3
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -1176,305 +779,79 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v4i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm4, %rdx
-; SSE2-NEXT: movq %rdx, %r14
-; SSE2-NEXT: sarq $63, %r14
-; SSE2-NEXT: movq %xmm1, %rcx
-; SSE2-NEXT: movq %rcx, %r10
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rsi
-; SSE2-NEXT: movq %rsi, %r11
-; SSE2-NEXT: sarq $63, %r11
-; SSE2-NEXT: movq %xmm0, %r8
-; SSE2-NEXT: movq %r8, %rbx
-; SSE2-NEXT: sarq $63, %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rdi
-; SSE2-NEXT: movq %xmm3, %r15
-; SSE2-NEXT: movq %r15, %r9
-; SSE2-NEXT: sarq $63, %r9
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r12
-; SSE2-NEXT: movq %r12, %r13
-; SSE2-NEXT: sarq $63, %r13
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, %rbp
-; SSE2-NEXT: sarq $63, %rbp
-; SSE2-NEXT: addq %rax, %r8
-; SSE2-NEXT: adcq %rbx, %rbp
-; SSE2-NEXT: addq %r12, %rsi
-; SSE2-NEXT: adcq %r11, %r13
-; SSE2-NEXT: addq %r15, %rcx
-; SSE2-NEXT: adcq %r10, %r9
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; SSE2-NEXT: adcq %r14, %rdi
-; SSE2-NEXT: shldq $63, %rdx, %rdi
-; SSE2-NEXT: shldq $63, %rcx, %r9
-; SSE2-NEXT: shldq $63, %rsi, %r13
-; SSE2-NEXT: shldq $63, %r8, %rbp
-; SSE2-NEXT: movq %rbp, %xmm0
-; SSE2-NEXT: movq %r13, %xmm2
-; SSE2-NEXT: movq %r9, %xmm1
-; SSE2-NEXT: movq %rdi, %xmm3
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: paddq %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm3, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm4
+; SSE2-NEXT: psrlq $1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; SSE2-NEXT: pand %xmm3, %xmm1
+; SSE2-NEXT: paddq %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v4i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: movq %xmm1, %rdi
-; SSE4-NEXT: movq %rdi, %r14
-; SSE4-NEXT: sarq $63, %r14
-; SSE4-NEXT: pextrq $1, %xmm1, %rcx
-; SSE4-NEXT: movq %rcx, %r10
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: movq %xmm0, %rsi
-; SSE4-NEXT: movq %rsi, %r11
-; SSE4-NEXT: sarq $63, %r11
-; SSE4-NEXT: pextrq $1, %xmm0, %r8
-; SSE4-NEXT: movq %r8, %rbx
-; SSE4-NEXT: sarq $63, %rbx
-; SSE4-NEXT: movq %xmm3, %rdx
-; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rdx
-; SSE4-NEXT: pextrq $1, %xmm3, %r15
-; SSE4-NEXT: movq %r15, %r9
-; SSE4-NEXT: sarq $63, %r9
-; SSE4-NEXT: movq %xmm2, %r12
-; SSE4-NEXT: movq %r12, %r13
-; SSE4-NEXT: sarq $63, %r13
-; SSE4-NEXT: pextrq $1, %xmm2, %rax
-; SSE4-NEXT: movq %rax, %rbp
-; SSE4-NEXT: sarq $63, %rbp
-; SSE4-NEXT: addq %rax, %r8
-; SSE4-NEXT: adcq %rbx, %rbp
-; SSE4-NEXT: addq %r12, %rsi
-; SSE4-NEXT: adcq %r11, %r13
-; SSE4-NEXT: addq %r15, %rcx
-; SSE4-NEXT: adcq %r10, %r9
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
-; SSE4-NEXT: adcq %r14, %rdx
-; SSE4-NEXT: shldq $63, %rdi, %rdx
-; SSE4-NEXT: shldq $63, %rcx, %r9
-; SSE4-NEXT: shldq $63, %rsi, %r13
-; SSE4-NEXT: shldq $63, %r8, %rbp
-; SSE4-NEXT: movq %rbp, %xmm2
-; SSE4-NEXT: movq %r13, %xmm0
-; SSE4-NEXT: movq %r9, %xmm3
-; SSE4-NEXT: movq %rdx, %xmm1
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: movdqa %xmm0, %xmm4
+; SSE4-NEXT: pxor %xmm2, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm2, %xmm0
+; SSE4-NEXT: paddq %xmm4, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm2
+; SSE4-NEXT: pxor %xmm3, %xmm2
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: psrad $1, %xmm4
+; SSE4-NEXT: psrlq $1, %xmm2
+; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; SSE4-NEXT: pand %xmm3, %xmm1
+; SSE4-NEXT: paddq %xmm2, %xmm1
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_ext_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: movq %rdx, %r14
-; AVX1-NEXT: sarq $63, %r14
-; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX1-NEXT: movq %rcx, %r10
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rsi
-; AVX1-NEXT: movq %rsi, %r11
-; AVX1-NEXT: sarq $63, %r11
-; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX1-NEXT: movq %rdi, %rbx
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vmovq %xmm1, %r8
-; AVX1-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r8
-; AVX1-NEXT: vpextrq $1, %xmm1, %r15
-; AVX1-NEXT: movq %r15, %r9
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r12
-; AVX1-NEXT: movq %r12, %r13
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: movq %rax, %rbp
-; AVX1-NEXT: sarq $63, %rbp
-; AVX1-NEXT: addq %rax, %rdi
-; AVX1-NEXT: adcq %rbx, %rbp
-; AVX1-NEXT: addq %r12, %rsi
-; AVX1-NEXT: adcq %r11, %r13
-; AVX1-NEXT: addq %r15, %rcx
-; AVX1-NEXT: adcq %r10, %r9
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; AVX1-NEXT: adcq %r14, %r8
-; AVX1-NEXT: shldq $63, %rdx, %r8
-; AVX1-NEXT: shldq $63, %rcx, %r9
-; AVX1-NEXT: shldq $63, %rsi, %r13
-; AVX1-NEXT: shldq $63, %rdi, %rbp
-; AVX1-NEXT: vmovq %rbp, %xmm0
-; AVX1-NEXT: vmovq %r13, %xmm1
-; AVX1-NEXT: vmovq %r9, %xmm2
-; AVX1-NEXT: vmovq %r8, %xmm3
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: movq %rdx, %r14
-; AVX2-NEXT: sarq $63, %r14
-; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX2-NEXT: movq %rcx, %r10
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rsi
-; AVX2-NEXT: movq %rsi, %r11
-; AVX2-NEXT: sarq $63, %r11
-; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX2-NEXT: movq %rdi, %rbx
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vmovq %xmm1, %r8
-; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r8
-; AVX2-NEXT: vpextrq $1, %xmm1, %r15
-; AVX2-NEXT: movq %r15, %r9
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r12
-; AVX2-NEXT: movq %r12, %r13
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: movq %rax, %rbp
-; AVX2-NEXT: sarq $63, %rbp
-; AVX2-NEXT: addq %rax, %rdi
-; AVX2-NEXT: adcq %rbx, %rbp
-; AVX2-NEXT: addq %r12, %rsi
-; AVX2-NEXT: adcq %r11, %r13
-; AVX2-NEXT: addq %r15, %rcx
-; AVX2-NEXT: adcq %r10, %r9
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; AVX2-NEXT: adcq %r14, %r8
-; AVX2-NEXT: shldq $63, %rdx, %r8
-; AVX2-NEXT: shldq $63, %rcx, %r9
-; AVX2-NEXT: shldq $63, %rsi, %r13
-; AVX2-NEXT: shldq $63, %rdi, %rbp
-; AVX2-NEXT: vmovq %rbp, %xmm0
-; AVX2-NEXT: vmovq %r13, %xmm1
-; AVX2-NEXT: vmovq %r9, %xmm2
-; AVX2-NEXT: vmovq %r8, %xmm3
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm3
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: movq %rdx, %r14
-; AVX512-NEXT: sarq $63, %r14
-; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX512-NEXT: movq %rcx, %r10
-; AVX512-NEXT: sarq $63, %r10
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: movq %rsi, %r11
-; AVX512-NEXT: sarq $63, %r11
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdi
-; AVX512-NEXT: movq %rdi, %rbx
-; AVX512-NEXT: sarq $63, %rbx
-; AVX512-NEXT: vmovq %xmm1, %r8
-; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r8
-; AVX512-NEXT: vpextrq $1, %xmm1, %r15
-; AVX512-NEXT: movq %r15, %r9
-; AVX512-NEXT: sarq $63, %r9
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r12
-; AVX512-NEXT: movq %r12, %r13
-; AVX512-NEXT: sarq $63, %r13
-; AVX512-NEXT: vpextrq $1, %xmm0, %rax
-; AVX512-NEXT: movq %rax, %rbp
-; AVX512-NEXT: sarq $63, %rbp
-; AVX512-NEXT: addq %rax, %rdi
-; AVX512-NEXT: adcq %rbx, %rbp
-; AVX512-NEXT: addq %r12, %rsi
-; AVX512-NEXT: adcq %r11, %r13
-; AVX512-NEXT: addq %r15, %rcx
-; AVX512-NEXT: adcq %r10, %r9
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
-; AVX512-NEXT: adcq %r14, %r8
-; AVX512-NEXT: shldq $63, %rdx, %r8
-; AVX512-NEXT: shldq $63, %rcx, %r9
-; AVX512-NEXT: shldq $63, %rsi, %r13
-; AVX512-NEXT: shldq $63, %rdi, %rbp
-; AVX512-NEXT: vmovq %rbp, %xmm0
-; AVX512-NEXT: vmovq %r13, %xmm1
-; AVX512-NEXT: vmovq %r9, %xmm2
-; AVX512-NEXT: vmovq %r8, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsraq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = sext <4 x i64> %a0 to <4 x i128>
%x1 = sext <4 x i64> %a1 to <4 x i128>
@@ -1491,96 +868,96 @@ define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v64i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm10
-; SSE-NEXT: pand %xmm7, %xmm10
-; SSE-NEXT: movdqa %xmm2, %xmm11
-; SSE-NEXT: pand %xmm6, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pand %xmm4, %xmm9
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm9, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm9
; SSE-NEXT: pand %xmm5, %xmm9
-; SSE-NEXT: movdqa %xmm0, %xmm8
-; SSE-NEXT: pand %xmm4, %xmm8
-; SSE-NEXT: pxor %xmm4, %xmm0
; SSE-NEXT: pxor %xmm5, %xmm1
-; SSE-NEXT: pxor %xmm6, %xmm2
-; SSE-NEXT: pxor %xmm7, %xmm3
-; SSE-NEXT: psrlw $1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; SSE-NEXT: pand %xmm5, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; SSE-NEXT: pxor %xmm4, %xmm3
-; SSE-NEXT: paddb %xmm10, %xmm3
-; SSE-NEXT: psrlw $1, %xmm2
-; SSE-NEXT: pand %xmm5, %xmm2
-; SSE-NEXT: pxor %xmm4, %xmm2
-; SSE-NEXT: paddb %xmm11, %xmm2
; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: pand %xmm5, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
; SSE-NEXT: pxor %xmm4, %xmm1
; SSE-NEXT: paddb %xmm9, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand %xmm5, %xmm0
-; SSE-NEXT: pxor %xmm4, %xmm0
-; SSE-NEXT: paddb %xmm8, %xmm0
-; SSE-NEXT: psubb %xmm4, %xmm0
; SSE-NEXT: psubb %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm5, %xmm2
; SSE-NEXT: psubb %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm5, %xmm3
; SSE-NEXT: psubb %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm7
-; AVX1-NEXT: vpand %xmm3, %xmm7, %xmm7
-; AVX1-NEXT: vpxor %xmm6, %xmm7, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0
-; AVX1-NEXT: vpaddb %xmm5, %xmm7, %xmm3
-; AVX1-NEXT: vpsubb %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpsubb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsubb %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsubb %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm3 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
-; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm5, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsubb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v64i8:
@@ -1601,240 +978,109 @@ define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
}
define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3],xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
-; SSE2-NEXT: psraw $8, %xmm13
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm3[8],xmm14[9],xmm3[9],xmm14[10],xmm3[10],xmm14[11],xmm3[11],xmm14[12],xmm3[12],xmm14[13],xmm3[13],xmm14[14],xmm3[14],xmm14[15],xmm3[15]
-; SSE2-NEXT: psraw $8, %xmm14
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3],xmm15[4],xmm2[4],xmm15[5],xmm2[5],xmm15[6],xmm2[6],xmm15[7],xmm2[7]
-; SSE2-NEXT: psraw $8, %xmm15
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15]
-; SSE2-NEXT: psraw $8, %xmm12
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
-; SSE2-NEXT: psraw $8, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm1[8],xmm10[9],xmm1[9],xmm10[10],xmm1[10],xmm10[11],xmm1[11],xmm10[12],xmm1[12],xmm10[13],xmm1[13],xmm10[14],xmm1[14],xmm10[15],xmm1[15]
-; SSE2-NEXT: psraw $8, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: psraw $8, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
-; SSE2-NEXT: psraw $8, %xmm8
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; SSE2-NEXT: psraw $8, %xmm3
-; SSE2-NEXT: paddw %xmm13, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm7
-; SSE2-NEXT: paddw %xmm14, %xmm7
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; SSE2-NEXT: psraw $8, %xmm2
-; SSE2-NEXT: paddw %xmm15, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm6
-; SSE2-NEXT: paddw %xmm12, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE2-NEXT: psraw $8, %xmm1
-; SSE2-NEXT: paddw %xmm11, %xmm1
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: paddw %xmm10, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: psraw $8, %xmm0
-; SSE2-NEXT: paddw %xmm9, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: paddw %xmm8, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm8, %xmm4
-; SSE2-NEXT: pand %xmm8, %xmm0
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: pand %xmm8, %xmm5
-; SSE2-NEXT: pand %xmm8, %xmm1
-; SSE2-NEXT: packuswb %xmm5, %xmm1
-; SSE2-NEXT: pand %xmm8, %xmm6
-; SSE2-NEXT: pand %xmm8, %xmm2
-; SSE2-NEXT: packuswb %xmm6, %xmm2
-; SSE2-NEXT: pand %xmm8, %xmm7
-; SSE2-NEXT: pand %xmm8, %xmm3
-; SSE2-NEXT: packuswb %xmm7, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v64i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm8, %xmm9
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm3, %xmm11
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm2, %xmm13
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm1, %xmm15
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm0
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm8
-; SSE4-NEXT: paddw %xmm9, %xmm8
-; SSE4-NEXT: pmovsxbw %xmm7, %xmm3
-; SSE4-NEXT: paddw %xmm10, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm7
-; SSE4-NEXT: paddw %xmm11, %xmm7
-; SSE4-NEXT: pmovsxbw %xmm6, %xmm2
-; SSE4-NEXT: paddw %xmm12, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm6
-; SSE4-NEXT: paddw %xmm13, %xmm6
-; SSE4-NEXT: pmovsxbw %xmm5, %xmm1
-; SSE4-NEXT: paddw %xmm14, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxbw %xmm0, %xmm5
-; SSE4-NEXT: paddw %xmm15, %xmm5
-; SSE4-NEXT: pmovsxbw %xmm4, %xmm0
-; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE4-NEXT: psrlw $1, %xmm8
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm7
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm6
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm5
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE4-NEXT: pand %xmm4, %xmm0
-; SSE4-NEXT: pand %xmm4, %xmm5
-; SSE4-NEXT: packuswb %xmm5, %xmm0
-; SSE4-NEXT: pand %xmm4, %xmm1
-; SSE4-NEXT: pand %xmm4, %xmm6
-; SSE4-NEXT: packuswb %xmm6, %xmm1
-; SSE4-NEXT: pand %xmm4, %xmm2
-; SSE4-NEXT: pand %xmm4, %xmm7
-; SSE4-NEXT: packuswb %xmm7, %xmm2
-; SSE4-NEXT: pand %xmm4, %xmm3
-; SSE4-NEXT: pand %xmm4, %xmm8
-; SSE4-NEXT: packuswb %xmm8, %xmm3
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm9
+; SSE-NEXT: pand %xmm4, %xmm9
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm8, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm9, %xmm0
+; SSE-NEXT: psubb %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm8, %xmm1
+; SSE-NEXT: pxor %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm9, %xmm1
+; SSE-NEXT: psubb %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm8, %xmm2
+; SSE-NEXT: pxor %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm5, %xmm2
+; SSE-NEXT: psubb %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: pand %xmm8, %xmm3
+; SSE-NEXT: pxor %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm5, %xmm3
+; SSE-NEXT: psubb %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm6, %xmm6
-; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm9, %xmm9
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
-; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm10
-; AVX1-NEXT: vpaddw %xmm5, %xmm10, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm10, %xmm10
-; AVX1-NEXT: vpaddw %xmm6, %xmm10, %xmm6
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3
-; AVX1-NEXT: vpaddw %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm7
-; AVX1-NEXT: vpaddw %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxbw %xmm8, %xmm8
-; AVX1-NEXT: vpaddw %xmm8, %xmm9, %xmm8
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm4
-; AVX1-NEXT: vpsrlw $1, %xmm6, %xmm5
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
-; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
-; AVX1-NEXT: vpand %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm6, %xmm8, %xmm6
-; AVX1-NEXT: vpand %xmm3, %xmm8, %xmm3
-; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vpand %xmm1, %xmm8, %xmm1
-; AVX1-NEXT: vpand %xmm5, %xmm8, %xmm3
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm8, %xmm3
-; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
-; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX1-NEXT: vpxor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vpsubb %xmm7, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsubb %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpsubb %xmm7, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxbw %xmm3, %ymm6
-; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3
-; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm3
-; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm3
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm4 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
-; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpackuswb %ymm0, %ymm3, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpackuswb %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm5 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX2-NEXT: vpxor %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpxor %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm4, %ymm1, %ymm1
+; AVX2-NEXT: vpsubb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovsxbw %ymm2, %zmm2
-; AVX512-NEXT: vpmovsxbw %ymm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovsxbw %ymm3, %zmm3
-; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpbroadcastb {{.*#+}} zmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
+; AVX512-NEXT: vpternlogd $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm0
+; AVX512-NEXT: vpaddb %zmm2, %zmm0, %zmm0
+; AVX512-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; AVX512-NEXT: retq
%x0 = sext <64 x i8> %a0 to <64 x i16>
%x1 = sext <64 x i8> %a1 to <64 x i16>
@@ -1847,66 +1093,66 @@ define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pand %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: paddw %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psraw $1, %xmm3
-; SSE-NEXT: paddw %xmm8, %xmm3
-; SSE-NEXT: psraw $1, %xmm2
-; SSE-NEXT: paddw %xmm9, %xmm2
-; SSE-NEXT: psraw $1, %xmm1
-; SSE-NEXT: paddw %xmm10, %xmm1
-; SSE-NEXT: psraw $1, %xmm0
-; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpaddw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v32i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -1918,240 +1164,70 @@ define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
}
define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: psrad $16, %xmm15
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm1[0],xmm14[1],xmm1[1],xmm14[2],xmm1[2],xmm14[3],xmm1[3]
-; SSE2-NEXT: psrad $16, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm2[4],xmm13[5],xmm2[5],xmm13[6],xmm2[6],xmm13[7],xmm2[7]
-; SSE2-NEXT: psrad $16, %xmm13
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
-; SSE2-NEXT: psrad $16, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
-; SSE2-NEXT: psrad $16, %xmm11
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: paddd %xmm8, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: paddd %xmm15, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: paddd %xmm3, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: paddd %xmm14, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: paddd %xmm13, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: paddd %xmm12, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm11, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: paddd %xmm10, %xmm3
-; SSE2-NEXT: pslld $15, %xmm9
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm9, %xmm0
-; SSE2-NEXT: pslld $15, %xmm8
-; SSE2-NEXT: psrad $16, %xmm8
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm8, %xmm1
-; SSE2-NEXT: pslld $15, %xmm5
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: pslld $15, %xmm4
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pslld $15, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm4, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm8, %xmm9
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm3, %xmm11
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm2, %xmm13
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm1, %xmm15
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm0
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm8
-; SSE4-NEXT: paddd %xmm9, %xmm8
-; SSE4-NEXT: pmovsxwd %xmm7, %xmm3
-; SSE4-NEXT: paddd %xmm10, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm7
-; SSE4-NEXT: paddd %xmm11, %xmm7
-; SSE4-NEXT: pmovsxwd %xmm6, %xmm2
-; SSE4-NEXT: paddd %xmm12, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm6
-; SSE4-NEXT: paddd %xmm13, %xmm6
-; SSE4-NEXT: pmovsxwd %xmm5, %xmm1
-; SSE4-NEXT: paddd %xmm14, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxwd %xmm0, %xmm5
-; SSE4-NEXT: paddd %xmm15, %xmm5
-; SSE4-NEXT: pmovsxwd %xmm4, %xmm0
-; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE4-NEXT: psrld $1, %xmm8
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: psrld $1, %xmm7
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: psrld $1, %xmm6
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm5
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: pxor %xmm4, %xmm4
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm5, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm6, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm7, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
-; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0],xmm4[1],xmm8[2],xmm4[3],xmm8[4],xmm4[5],xmm8[6],xmm4[7]
-; SSE4-NEXT: packusdw %xmm8, %xmm3
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psraw $1, %xmm0
+; SSE-NEXT: paddw %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psraw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psraw $1, %xmm2
+; SSE-NEXT: paddw %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psraw $1, %xmm3
+; SSE-NEXT: paddw %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm6, %xmm6
-; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm7, %xmm7
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm9, %xmm9
-; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
-; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm10
-; AVX1-NEXT: vpaddd %xmm5, %xmm10, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm10, %xmm10
-; AVX1-NEXT: vpaddd %xmm6, %xmm10, %xmm6
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm7
-; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
-; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxwd %xmm8, %xmm8
-; AVX1-NEXT: vpaddd %xmm8, %xmm9, %xmm8
-; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm4
-; AVX1-NEXT: vpsrld $1, %xmm6, %xmm5
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
-; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2],xmm8[3],xmm0[4],xmm8[5],xmm0[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2],xmm8[3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4],xmm8[5],xmm6[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1],xmm1[2],xmm8[3],xmm1[4],xmm8[5],xmm1[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4],xmm8[5],xmm5[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2],xmm8[3],xmm2[4],xmm8[5],xmm2[6],xmm8[7]
-; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm6
-; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3
-; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
-; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3
-; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
-; AVX2-NEXT: vpackusdw %ymm0, %ymm3, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
-; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7],ymm2[8],ymm4[9],ymm2[10],ymm4[11],ymm2[12],ymm4[13],ymm2[14],ymm4[15]
-; AVX2-NEXT: vpackusdw %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsraw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsraw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <32 x i16> %a0 to <32 x i32>
%x1 = sext <32 x i16> %a1 to <32 x i32>
@@ -2164,66 +1240,66 @@ define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pand %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: paddd %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psrad $1, %xmm3
-; SSE-NEXT: paddd %xmm8, %xmm3
-; SSE-NEXT: psrad $1, %xmm2
-; SSE-NEXT: paddd %xmm9, %xmm2
-; SSE-NEXT: psrad $1, %xmm1
-; SSE-NEXT: paddd %xmm10, %xmm1
-; SSE-NEXT: psrad $1, %xmm0
-; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2235,233 +1311,70 @@ define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
}
define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm3, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm13 = xmm3[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm13, %xmm9
-; SSE2-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm2, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm12, %xmm9
-; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm1, %xmm9
-; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm1[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm9, %xmm9
-; SSE2-NEXT: pcmpgtd %xmm11, %xmm9
-; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm10
-; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: pcmpgtd %xmm9, %xmm10
-; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
-; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm14
-; SSE2-NEXT: pshufd {{.*#+}} xmm10 = xmm7[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
-; SSE2-NEXT: paddq %xmm7, %xmm3
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: pcmpgtd %xmm10, %xmm7
-; SSE2-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1]
-; SSE2-NEXT: paddq %xmm13, %xmm10
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm13
-; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
-; SSE2-NEXT: paddq %xmm6, %xmm2
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pcmpgtd %xmm7, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE2-NEXT: paddq %xmm12, %xmm7
-; SSE2-NEXT: pxor %xmm12, %xmm12
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm12
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
-; SSE2-NEXT: paddq %xmm5, %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtd %xmm6, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm11, %xmm6
-; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: pcmpgtd %xmm4, %xmm11
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: pcmpgtd %xmm5, %xmm8
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm9, %xmm5
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm10
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm8
-; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm3, %xmm9
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm10
-; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm2, %xmm11
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm12
-; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm1, %xmm13
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm14
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm15
-; SSE4-NEXT: pmovsxdq %xmm7, %xmm3
-; SSE4-NEXT: paddq %xmm8, %xmm3
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm7
-; SSE4-NEXT: paddq %xmm9, %xmm7
-; SSE4-NEXT: pmovsxdq %xmm6, %xmm2
-; SSE4-NEXT: paddq %xmm10, %xmm2
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm6
-; SSE4-NEXT: paddq %xmm11, %xmm6
-; SSE4-NEXT: pmovsxdq %xmm5, %xmm1
-; SSE4-NEXT: paddq %xmm12, %xmm1
-; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm0, %xmm5
-; SSE4-NEXT: paddq %xmm13, %xmm5
-; SSE4-NEXT: pmovsxdq %xmm4, %xmm0
-; SSE4-NEXT: paddq %xmm14, %xmm0
-; SSE4-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
-; SSE4-NEXT: pmovsxdq %xmm4, %xmm4
-; SSE4-NEXT: paddq %xmm15, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm7
-; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm6
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm5
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrad $1, %xmm0
+; SSE-NEXT: paddd %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrad $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrad $1, %xmm2
+; SSE-NEXT: paddd %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrad $1, %xmm3
+; SSE-NEXT: paddd %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm5, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm6, %xmm6
-; AVX1-NEXT: vpmovsxdq %xmm4, %xmm4
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
-; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm8, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm9, %xmm9
-; AVX1-NEXT: vpmovsxdq %xmm7, %xmm7
-; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
-; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm10[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
-; AVX1-NEXT: vpaddq %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vpshufd {{.*#+}} xmm11 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm11, %xmm11
-; AVX1-NEXT: vpaddq %xmm6, %xmm11, %xmm6
-; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
-; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm3[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
-; AVX1-NEXT: vpaddq %xmm10, %xmm8, %xmm8
-; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm2[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm10, %xmm10
-; AVX1-NEXT: vpaddq %xmm10, %xmm9, %xmm9
-; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3
-; AVX1-NEXT: vpaddq %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpmovsxdq %xmm2, %xmm2
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm5
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm8, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm3
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
-; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5
-; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
-; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
-; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
-; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6
-; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
-; AVX2-NEXT: vpmovsxdq %xmm2, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpmovsxdq %xmm3, %ymm2
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrad $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrad $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovsxdq %ymm2, %zmm2
-; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovsxdq %ymm3, %zmm3
-; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrad $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <16 x i32> %a0 to <16 x i64>
%x1 = sext <16 x i32> %a1 to <16 x i64>
@@ -2474,130 +1387,130 @@ define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_fixed_v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm3, %xmm11
-; SSE2-NEXT: pand %xmm7, %xmm11
-; SSE2-NEXT: movdqa %xmm2, %xmm10
-; SSE2-NEXT: pand %xmm6, %xmm10
-; SSE2-NEXT: movdqa %xmm1, %xmm9
-; SSE2-NEXT: pand %xmm5, %xmm9
; SSE2-NEXT: movdqa %xmm0, %xmm8
-; SSE2-NEXT: pand %xmm4, %xmm8
-; SSE2-NEXT: pxor %xmm0, %xmm4
-; SSE2-NEXT: pxor %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm2, %xmm6
-; SSE2-NEXT: pxor %xmm3, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; SSE2-NEXT: paddq %xmm11, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: paddq %xmm10, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,3,2,3]
-; SSE2-NEXT: psrad $1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: paddq %xmm9, %xmm1
+; SSE2-NEXT: pxor %xmm4, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: paddq %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm8
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
; SSE2-NEXT: psrad $1, %xmm5
; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
-; SSE2-NEXT: paddq %xmm8, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: paddq %xmm4, %xmm3
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_fixed_v8i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm10
-; SSE4-NEXT: pand %xmm7, %xmm10
-; SSE4-NEXT: movdqa %xmm2, %xmm11
-; SSE4-NEXT: pand %xmm6, %xmm11
-; SSE4-NEXT: movdqa %xmm1, %xmm9
-; SSE4-NEXT: pand %xmm5, %xmm9
; SSE4-NEXT: movdqa %xmm0, %xmm8
-; SSE4-NEXT: pand %xmm4, %xmm8
-; SSE4-NEXT: pxor %xmm4, %xmm0
-; SSE4-NEXT: pxor %xmm5, %xmm1
-; SSE4-NEXT: pxor %xmm6, %xmm2
-; SSE4-NEXT: pxor %xmm7, %xmm3
-; SSE4-NEXT: movdqa %xmm3, %xmm4
-; SSE4-NEXT: psrad $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; SSE4-NEXT: paddq %xmm10, %xmm3
-; SSE4-NEXT: movdqa %xmm2, %xmm4
-; SSE4-NEXT: psrad $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
-; SSE4-NEXT: paddq %xmm11, %xmm2
-; SSE4-NEXT: movdqa %xmm1, %xmm4
-; SSE4-NEXT: psrad $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
-; SSE4-NEXT: paddq %xmm9, %xmm1
-; SSE4-NEXT: movdqa %xmm0, %xmm4
-; SSE4-NEXT: psrad $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
+; SSE4-NEXT: pxor %xmm4, %xmm8
+; SSE4-NEXT: movdqa %xmm8, %xmm9
+; SSE4-NEXT: psrad $1, %xmm9
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
+; SSE4-NEXT: pand %xmm4, %xmm0
; SSE4-NEXT: paddq %xmm8, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm8
+; SSE4-NEXT: psrad $1, %xmm8
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3],xmm4[4,5],xmm8[6,7]
+; SSE4-NEXT: pand %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: pxor %xmm6, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: pxor %xmm7, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm7, %xmm3
+; SSE4-NEXT: paddq %xmm4, %xmm3
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm6
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2,3],xmm6[4,5],xmm3[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrad $1, %xmm0, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrad $1, %xmm5, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm1, %ymm2
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vpsrad $1, %ymm0, %ymm2
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpsrad $1, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlq $1, %ymm4, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm4
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7]
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2611,631 +1524,132 @@ define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE2-LABEL: test_ext_v8i64:
; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm8, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %rax, %rcx
-; SSE2-NEXT: sarq $63, %rcx
-; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: movq %xmm1, %rbp
-; SSE2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rbp
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %rbx
-; SSE2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %rbx
-; SSE2-NEXT: movq %xmm0, %r15
-; SSE2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r15
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r10
-; SSE2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r10
-; SSE2-NEXT: movq %xmm7, %r9
-; SSE2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r9
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r12
-; SSE2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r12
-; SSE2-NEXT: movq %xmm6, %r13
-; SSE2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: sarq $63, %r13
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r14
-; SSE2-NEXT: movq %r14, %rsi
-; SSE2-NEXT: sarq $63, %rsi
-; SSE2-NEXT: movq %xmm5, %r11
-; SSE2-NEXT: movq %r11, %rdx
-; SSE2-NEXT: sarq $63, %rdx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r8
-; SSE2-NEXT: movq %r8, %rdi
-; SSE2-NEXT: sarq $63, %rdi
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: movq %rcx, %rax
-; SSE2-NEXT: sarq $63, %rax
-; SSE2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE2-NEXT: adcq %r15, %rax
-; SSE2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE2-NEXT: adcq %rbx, %rdi
-; SSE2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE2-NEXT: adcq %rbp, %rdx
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; SSE2-NEXT: addq %r14, %r15
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; SSE2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; SSE2-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
-; SSE2-NEXT: shldq $63, %rcx, %r10
-; SSE2-NEXT: shldq $63, %r8, %r9
-; SSE2-NEXT: shldq $63, %r11, %r12
-; SSE2-NEXT: shldq $63, %rbx, %r13
-; SSE2-NEXT: shldq $63, %r15, %rsi
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rcx, %rdx
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rcx, %rdi
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rcx, %rax
-; SSE2-NEXT: movq %rax, %xmm0
-; SSE2-NEXT: movq %rdi, %xmm4
-; SSE2-NEXT: movq %rdx, %xmm1
-; SSE2-NEXT: movq %rsi, %xmm5
-; SSE2-NEXT: movq %r13, %xmm2
-; SSE2-NEXT: movq %r12, %xmm6
-; SSE2-NEXT: movq %r9, %xmm3
-; SSE2-NEXT: movq %r10, %xmm7
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE2-NEXT: addq $8, %rsp
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
+; SSE2-NEXT: movdqa %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm4, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm9 = xmm8[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm9
+; SSE2-NEXT: psrlq $1, %xmm8
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm8[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: paddq %xmm8, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: pxor %xmm5, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm8
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
+; SSE2-NEXT: pand %xmm5, %xmm1
+; SSE2-NEXT: paddq %xmm4, %xmm1
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: pxor %xmm6, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm6, %xmm2
+; SSE2-NEXT: paddq %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm7, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,3,2,3]
+; SSE2-NEXT: psrad $1, %xmm5
+; SSE2-NEXT: psrlq $1, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; SSE2-NEXT: pand %xmm7, %xmm3
+; SSE2-NEXT: paddq %xmm4, %xmm3
; SSE2-NEXT: retq
;
; SSE4-LABEL: test_ext_v8i64:
; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: pushq %rax
-; SSE4-NEXT: movq %xmm3, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm3, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm2, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm2, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %xmm1, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: movq %rax, %rcx
-; SSE4-NEXT: sarq $63, %rcx
-; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: pextrq $1, %xmm1, %rbp
-; SSE4-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rbp
-; SSE4-NEXT: movq %xmm0, %rbx
-; SSE4-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %rbx
-; SSE4-NEXT: pextrq $1, %xmm0, %r14
-; SSE4-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r14
-; SSE4-NEXT: movq %xmm7, %r10
-; SSE4-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r10
-; SSE4-NEXT: pextrq $1, %xmm7, %r9
-; SSE4-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r9
-; SSE4-NEXT: movq %xmm6, %r15
-; SSE4-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r15
-; SSE4-NEXT: pextrq $1, %xmm6, %r13
-; SSE4-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: sarq $63, %r13
-; SSE4-NEXT: movq %xmm5, %r12
-; SSE4-NEXT: movq %r12, %rsi
-; SSE4-NEXT: sarq $63, %rsi
-; SSE4-NEXT: pextrq $1, %xmm5, %r11
-; SSE4-NEXT: movq %r11, %rdx
-; SSE4-NEXT: sarq $63, %rdx
-; SSE4-NEXT: movq %xmm4, %r8
-; SSE4-NEXT: movq %r8, %rdi
-; SSE4-NEXT: sarq $63, %rdi
-; SSE4-NEXT: pextrq $1, %xmm4, %rcx
-; SSE4-NEXT: movq %rcx, %rax
-; SSE4-NEXT: sarq $63, %rax
-; SSE4-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE4-NEXT: adcq %r14, %rax
-; SSE4-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE4-NEXT: adcq %rbx, %rdi
-; SSE4-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; SSE4-NEXT: adcq %rbp, %rdx
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; SSE4-NEXT: addq %r12, %r14
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; SSE4-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; SSE4-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
-; SSE4-NEXT: shldq $63, %rcx, %r10
-; SSE4-NEXT: shldq $63, %r8, %r9
-; SSE4-NEXT: shldq $63, %r11, %r15
-; SSE4-NEXT: shldq $63, %rbx, %r13
-; SSE4-NEXT: shldq $63, %r14, %rsi
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rcx, %rdx
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rcx, %rdi
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rcx, %rax
-; SSE4-NEXT: movq %rax, %xmm4
-; SSE4-NEXT: movq %rdi, %xmm0
-; SSE4-NEXT: movq %rdx, %xmm5
-; SSE4-NEXT: movq %rsi, %xmm1
-; SSE4-NEXT: movq %r13, %xmm6
-; SSE4-NEXT: movq %r15, %xmm2
-; SSE4-NEXT: movq %r9, %xmm7
-; SSE4-NEXT: movq %r10, %xmm3
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE4-NEXT: addq $8, %rsp
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
+; SSE4-NEXT: movdqa %xmm0, %xmm8
+; SSE4-NEXT: pxor %xmm4, %xmm8
+; SSE4-NEXT: movdqa %xmm8, %xmm9
+; SSE4-NEXT: psrad $1, %xmm9
+; SSE4-NEXT: psrlq $1, %xmm8
+; SSE4-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2,3],xmm8[4,5],xmm9[6,7]
+; SSE4-NEXT: pand %xmm4, %xmm0
+; SSE4-NEXT: paddq %xmm8, %xmm0
+; SSE4-NEXT: movdqa %xmm1, %xmm4
+; SSE4-NEXT: pxor %xmm5, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm8
+; SSE4-NEXT: psrad $1, %xmm8
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2,3],xmm4[4,5],xmm8[6,7]
+; SSE4-NEXT: pand %xmm5, %xmm1
+; SSE4-NEXT: paddq %xmm4, %xmm1
+; SSE4-NEXT: movdqa %xmm2, %xmm4
+; SSE4-NEXT: pxor %xmm6, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm6, %xmm2
+; SSE4-NEXT: paddq %xmm4, %xmm2
+; SSE4-NEXT: movdqa %xmm3, %xmm4
+; SSE4-NEXT: pxor %xmm7, %xmm4
+; SSE4-NEXT: movdqa %xmm4, %xmm5
+; SSE4-NEXT: psrad $1, %xmm5
+; SSE4-NEXT: psrlq $1, %xmm4
+; SSE4-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; SSE4-NEXT: pand %xmm7, %xmm3
+; SSE4-NEXT: paddq %xmm4, %xmm3
; SSE4-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: pushq %rax
-; AVX1-NEXT: vmovq %xmm1, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; AVX1-NEXT: vpextrq $1, %xmm1, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vmovq %xmm1, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrq $1, %xmm1, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: movq %rax, %rcx
-; AVX1-NEXT: sarq $63, %rcx
-; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: vpextrq $1, %xmm0, %rbp
-; AVX1-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %rbp
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rbx
-; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %rbx
-; AVX1-NEXT: vpextrq $1, %xmm0, %r15
-; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r15
-; AVX1-NEXT: vmovq %xmm3, %r9
-; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r9
-; AVX1-NEXT: vpextrq $1, %xmm3, %r10
-; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r10
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r12
-; AVX1-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r12
-; AVX1-NEXT: vpextrq $1, %xmm0, %r13
-; AVX1-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: sarq $63, %r13
-; AVX1-NEXT: vmovq %xmm2, %r14
-; AVX1-NEXT: movq %r14, %rsi
-; AVX1-NEXT: sarq $63, %rsi
-; AVX1-NEXT: vpextrq $1, %xmm2, %r11
-; AVX1-NEXT: movq %r11, %rdx
-; AVX1-NEXT: sarq $63, %rdx
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r8
-; AVX1-NEXT: movq %r8, %rdi
-; AVX1-NEXT: sarq $63, %rdi
-; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX1-NEXT: movq %rcx, %rax
-; AVX1-NEXT: sarq $63, %rax
-; AVX1-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT: adcq %r15, %rax
-; AVX1-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT: adcq %rbx, %rdi
-; AVX1-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX1-NEXT: adcq %rbp, %rdx
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX1-NEXT: addq %r14, %r15
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX1-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX1-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX1-NEXT: shldq $63, %rcx, %r9
-; AVX1-NEXT: shldq $63, %r8, %r10
-; AVX1-NEXT: shldq $63, %r11, %r12
-; AVX1-NEXT: shldq $63, %rbx, %r13
-; AVX1-NEXT: shldq $63, %r15, %rsi
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rcx, %rdx
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rcx, %rdi
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rcx, %rax
-; AVX1-NEXT: vmovq %rax, %xmm0
-; AVX1-NEXT: vmovq %rdi, %xmm1
-; AVX1-NEXT: vmovq %rdx, %xmm2
-; AVX1-NEXT: vmovq %rsi, %xmm3
-; AVX1-NEXT: vmovq %r13, %xmm4
-; AVX1-NEXT: vmovq %r12, %xmm5
-; AVX1-NEXT: vmovq %r10, %xmm6
-; AVX1-NEXT: vmovq %r9, %xmm7
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: addq $8, %rsp
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrad $1, %xmm5, %xmm6
+; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vpaddq %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrad $1, %xmm4, %xmm5
+; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
+; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5],xmm5[6,7]
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrad $1, %xmm2, %xmm4
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: pushq %rax
-; AVX2-NEXT: vmovq %xmm1, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; AVX2-NEXT: vpextrq $1, %xmm1, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
-; AVX2-NEXT: vmovq %xmm1, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vpextrq $1, %xmm1, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: movq %rax, %rcx
-; AVX2-NEXT: sarq $63, %rcx
-; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: vpextrq $1, %xmm0, %rbp
-; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %rbp
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rbx
-; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %rbx
-; AVX2-NEXT: vpextrq $1, %xmm0, %r15
-; AVX2-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r15
-; AVX2-NEXT: vmovq %xmm3, %r9
-; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r9
-; AVX2-NEXT: vpextrq $1, %xmm3, %r10
-; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r10
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r12
-; AVX2-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r12
-; AVX2-NEXT: vpextrq $1, %xmm0, %r13
-; AVX2-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: sarq $63, %r13
-; AVX2-NEXT: vmovq %xmm2, %r14
-; AVX2-NEXT: movq %r14, %rsi
-; AVX2-NEXT: sarq $63, %rsi
-; AVX2-NEXT: vpextrq $1, %xmm2, %r11
-; AVX2-NEXT: movq %r11, %rdx
-; AVX2-NEXT: sarq $63, %rdx
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r8
-; AVX2-NEXT: movq %r8, %rdi
-; AVX2-NEXT: sarq $63, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX2-NEXT: movq %rcx, %rax
-; AVX2-NEXT: sarq $63, %rax
-; AVX2-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: adcq %r15, %rax
-; AVX2-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: adcq %rbx, %rdi
-; AVX2-NEXT: addq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX2-NEXT: adcq %rbp, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX2-NEXT: addq %r14, %r15
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX2-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX2-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX2-NEXT: shldq $63, %rcx, %r9
-; AVX2-NEXT: shldq $63, %r8, %r10
-; AVX2-NEXT: shldq $63, %r11, %r12
-; AVX2-NEXT: shldq $63, %rbx, %r13
-; AVX2-NEXT: shldq $63, %r15, %rsi
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %rdx
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %rdi
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rcx, %rax
-; AVX2-NEXT: vmovq %rax, %xmm0
-; AVX2-NEXT: vmovq %rdi, %xmm1
-; AVX2-NEXT: vmovq %rdx, %xmm2
-; AVX2-NEXT: vmovq %rsi, %xmm3
-; AVX2-NEXT: vmovq %r13, %xmm4
-; AVX2-NEXT: vmovq %r12, %xmm5
-; AVX2-NEXT: vmovq %r10, %xmm6
-; AVX2-NEXT: vmovq %r9, %xmm7
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: addq $8, %rsp
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpsrad $1, %ymm4, %ymm5
+; AVX2-NEXT: vpsrlq $1, %ymm4, %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4],ymm5[5],ymm4[6],ymm5[7]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpsrad $1, %ymm2, %ymm4
+; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3],ymm2[4],ymm4[5],ymm2[6],ymm4[7]
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: pushq %rax
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm0, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: movq %rax, %rcx
-; AVX512-NEXT: sarq $63, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: vpextrq $1, %xmm0, %r13
-; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r13
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r14
-; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r14
-; AVX512-NEXT: vpextrq $1, %xmm0, %r15
-; AVX512-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r15
-; AVX512-NEXT: vmovq %xmm1, %r9
-; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r9
-; AVX512-NEXT: vpextrq $1, %xmm1, %r11
-; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r11
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r12
-; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %r12
-; AVX512-NEXT: vpextrq $1, %xmm0, %rbp
-; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: sarq $63, %rbp
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0
-; AVX512-NEXT: vmovq %xmm0, %rbx
-; AVX512-NEXT: movq %rbx, %rsi
-; AVX512-NEXT: sarq $63, %rsi
-; AVX512-NEXT: vpextrq $1, %xmm0, %r10
-; AVX512-NEXT: movq %r10, %rdx
-; AVX512-NEXT: sarq $63, %rdx
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r8
-; AVX512-NEXT: movq %r8, %rdi
-; AVX512-NEXT: sarq $63, %rdi
-; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX512-NEXT: movq %rcx, %rax
-; AVX512-NEXT: sarq $63, %rax
-; AVX512-NEXT: addq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: adcq %r15, %rax
-; AVX512-NEXT: addq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT: adcq %r14, %rdi
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT: addq %r10, %r15
-; AVX512-NEXT: adcq %r13, %rdx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
-; AVX512-NEXT: addq %rbx, %r14
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX512-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
-; AVX512-NEXT: adcq (%rsp), %r9 # 8-byte Folded Reload
-; AVX512-NEXT: shldq $63, %rcx, %r9
-; AVX512-NEXT: shldq $63, %r8, %r11
-; AVX512-NEXT: shldq $63, %r10, %r12
-; AVX512-NEXT: shldq $63, %rbx, %rbp
-; AVX512-NEXT: shldq $63, %r14, %rsi
-; AVX512-NEXT: shldq $63, %r15, %rdx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: shldq $63, %rcx, %rdi
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT: shldq $63, %rcx, %rax
-; AVX512-NEXT: vmovq %rax, %xmm0
-; AVX512-NEXT: vmovq %rdi, %xmm1
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vmovq %rsi, %xmm3
-; AVX512-NEXT: vmovq %rbp, %xmm4
-; AVX512-NEXT: vmovq %r12, %xmm5
-; AVX512-NEXT: vmovq %r11, %xmm6
-; AVX512-NEXT: vmovq %r9, %xmm7
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: addq $8, %rsp
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsraq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = sext <8 x i64> %a0 to <8 x i128>
%x1 = sext <8 x i64> %a1 to <8 x i128>
diff --git a/llvm/test/CodeGen/X86/avgflooru.ll b/llvm/test/CodeGen/X86/avgflooru.ll
index 000457c5ab1e6..82a0796b116ee 100644
--- a/llvm/test/CodeGen/X86/avgflooru.ll
+++ b/llvm/test/CodeGen/X86/avgflooru.ll
@@ -54,70 +54,41 @@ define <16 x i8> @test_fixed_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
}
define <16 x i8> @test_ext_v16i8(<16 x i8> %a0, <16 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; SSE2-NEXT: paddw %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE2-NEXT: paddw %xmm1, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: packuswb %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm3, %xmm3
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
-; SSE4-NEXT: paddw %xmm0, %xmm1
-; SSE4-NEXT: paddw %xmm4, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: packuswb %xmm1, %xmm2
-; SSE4-NEXT: movdqa %xmm2, %xmm0
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT: vpaddb %xmm0, %xmm2, %xmm0
; AVX512-NEXT: retq
%x0 = zext <16 x i8> %a0 to <16 x i16>
%x1 = zext <16 x i8> %a1 to <16 x i16>
@@ -140,7 +111,7 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
; AVX-LABEL: test_fixed_v8i16:
; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -152,73 +123,22 @@ define <8 x i16> @test_fixed_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
}
define <8 x i16> @test_ext_v8i16(<8 x i16> %a0, <8 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; SSE2-NEXT: paddd %xmm3, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: pslld $15, %xmm4
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm4, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm3, %xmm3
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; SSE4-NEXT: paddd %xmm0, %xmm1
-; SSE4-NEXT: paddd %xmm4, %xmm2
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: packusdw %xmm1, %xmm2
-; SSE4-NEXT: movdqa %xmm2, %xmm0
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v8i16:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v8i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v8i16:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovdw %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = zext <8 x i16> %a0 to <8 x i32>
%x1 = zext <8 x i16> %a1 to <8 x i32>
%sum = add <8 x i32> %x0, %x1
@@ -240,7 +160,7 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; AVX-LABEL: test_fixed_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -252,71 +172,22 @@ define <4 x i32> @test_fixed_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
}
define <4 x i32> @test_ext_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; SSE2-NEXT: paddq %xmm3, %xmm4
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: paddq %xmm1, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm4
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm3, %xmm3
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE4-NEXT: paddq %xmm0, %xmm1
-; SSE4-NEXT: paddq %xmm4, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
-; SSE4-NEXT: movaps %xmm2, %xmm0
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v4i32:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovqd %ymm0, %xmm0
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = zext <4 x i32> %a0 to <4 x i64>
%x1 = zext <4 x i32> %a1 to <4 x i64>
%sum = add <4 x i64> %x0, %x1
@@ -338,7 +209,7 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; AVX-LABEL: test_fixed_v2i64:
; AVX: # %bb.0:
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
-; AVX-NEXT: vpxor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; AVX-NEXT: retq
@@ -350,102 +221,22 @@ define <2 x i64> @test_fixed_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
}
define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v2i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: movq %xmm0, %rcx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm1, %rsi
-; SSE2-NEXT: xorl %edi, %edi
-; SSE2-NEXT: addq %rcx, %rsi
-; SSE2-NEXT: setb %dil
-; SSE2-NEXT: xorl %ecx, %ecx
-; SSE2-NEXT: addq %rax, %rdx
-; SSE2-NEXT: setb %cl
-; SSE2-NEXT: shldq $63, %rdx, %rcx
-; SSE2-NEXT: shldq $63, %rsi, %rdi
-; SSE2-NEXT: movq %rdi, %xmm0
-; SSE2-NEXT: movq %rcx, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v2i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movq %xmm0, %rax
-; SSE4-NEXT: pextrq $1, %xmm0, %rcx
-; SSE4-NEXT: movq %xmm1, %rdx
-; SSE4-NEXT: pextrq $1, %xmm1, %rsi
-; SSE4-NEXT: xorl %edi, %edi
-; SSE4-NEXT: addq %rcx, %rsi
-; SSE4-NEXT: setb %dil
-; SSE4-NEXT: xorl %ecx, %ecx
-; SSE4-NEXT: addq %rax, %rdx
-; SSE4-NEXT: setb %cl
-; SSE4-NEXT: shldq $63, %rdx, %rcx
-; SSE4-NEXT: shldq $63, %rsi, %rdi
-; SSE4-NEXT: movq %rdi, %xmm1
-; SSE4-NEXT: movq %rcx, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE4-NEXT: retq
-;
-; AVX1-LABEL: test_ext_v2i64:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX1-NEXT: vmovq %xmm1, %rdx
-; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX1-NEXT: xorl %edi, %edi
-; AVX1-NEXT: addq %rcx, %rsi
-; AVX1-NEXT: setb %dil
-; AVX1-NEXT: xorl %ecx, %ecx
-; AVX1-NEXT: addq %rax, %rdx
-; AVX1-NEXT: setb %cl
-; AVX1-NEXT: shldq $63, %rdx, %rcx
-; AVX1-NEXT: shldq $63, %rsi, %rdi
-; AVX1-NEXT: vmovq %rdi, %xmm0
-; AVX1-NEXT: vmovq %rcx, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test_ext_v2i64:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX2-NEXT: vmovq %xmm1, %rdx
-; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX2-NEXT: xorl %edi, %edi
-; AVX2-NEXT: addq %rcx, %rsi
-; AVX2-NEXT: setb %dil
-; AVX2-NEXT: xorl %ecx, %ecx
-; AVX2-NEXT: addq %rax, %rdx
-; AVX2-NEXT: setb %cl
-; AVX2-NEXT: shldq $63, %rdx, %rcx
-; AVX2-NEXT: shldq $63, %rsi, %rdi
-; AVX2-NEXT: vmovq %rdi, %xmm0
-; AVX2-NEXT: vmovq %rcx, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: retq
+; SSE-LABEL: test_ext_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: pand %xmm1, %xmm2
+; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: retq
;
-; AVX512-LABEL: test_ext_v2i64:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: vpextrq $1, %xmm0, %rcx
-; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX512-NEXT: vmovq %xmm1, %rsi
-; AVX512-NEXT: xorl %edi, %edi
-; AVX512-NEXT: addq %rcx, %rdx
-; AVX512-NEXT: setb %dil
-; AVX512-NEXT: xorl %ecx, %ecx
-; AVX512-NEXT: addq %rax, %rsi
-; AVX512-NEXT: setb %cl
-; AVX512-NEXT: shldq $63, %rsi, %rcx
-; AVX512-NEXT: shldq $63, %rdx, %rdi
-; AVX512-NEXT: vmovq %rdi, %xmm0
-; AVX512-NEXT: vmovq %rcx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: retq
+; AVX-LABEL: test_ext_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT: retq
%x0 = zext <2 x i64> %a0 to <2 x i128>
%x1 = zext <2 x i64> %a1 to <2 x i128>
%sum = add <2 x i128> %x0, %x1
@@ -461,35 +252,35 @@ define <2 x i64> @test_ext_v2i64(<2 x i64> %a0, <2 x i64> %a1) nounwind {
define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i8:
; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
-; SSE-NEXT: pxor %xmm2, %xmm0
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: paddb %xmm4, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand %xmm2, %xmm0
-; SSE-NEXT: paddb %xmm5, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i8:
@@ -517,107 +308,55 @@ define <32 x i8> @test_fixed_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
}
define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: paddw %xmm5, %xmm7
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: paddw %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: paddw %xmm6, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: paddw %xmm2, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm5, %xmm5
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
-; SSE4-NEXT: paddw %xmm1, %xmm3
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; SSE4-NEXT: paddw %xmm0, %xmm2
-; SSE4-NEXT: paddw %xmm6, %xmm4
-; SSE4-NEXT: paddw %xmm7, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm4
-; SSE4-NEXT: packuswb %xmm3, %xmm4
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: packuswb %xmm2, %xmm1
-; SSE4-NEXT: movdqa %xmm1, %xmm0
-; SSE4-NEXT: movdqa %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: paddb %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm2, %xmm1
+; SSE-NEXT: paddb %xmm4, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
-; AVX1-NEXT: vpaddw %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
-; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX1-NEXT: vpaddw %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-NEXT: vpaddw %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm1
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; AVX512-NEXT: vpaddb %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <32 x i8> %a0 to <32 x i16>
%x1 = zext <32 x i8> %a1 to <32 x i16>
@@ -630,35 +369,35 @@ define <32 x i8> @test_ext_v32i8(<32 x i8> %a0, <32 x i8> %a1) nounwind {
define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: paddw %xmm4, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: paddw %xmm5, %xmm0
+; SSE-NEXT: paddw %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i16:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddw %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -666,7 +405,7 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512-LABEL: test_fixed_v16i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -678,111 +417,47 @@ define <16 x i16> @test_fixed_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
}
define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm5, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm2, %xmm0
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: pslld $15, %xmm7
-; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm7, %xmm0
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm2, %xmm1
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm5, %xmm5
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE4-NEXT: paddd %xmm1, %xmm3
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; SSE4-NEXT: paddd %xmm0, %xmm2
-; SSE4-NEXT: paddd %xmm6, %xmm4
-; SSE4-NEXT: paddd %xmm7, %xmm1
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: psrld $1, %xmm4
-; SSE4-NEXT: packusdw %xmm3, %xmm4
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: packusdw %xmm2, %xmm1
-; SSE4-NEXT: movdqa %xmm1, %xmm0
-; SSE4-NEXT: movdqa %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-NEXT: vpaddd %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm2, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <16 x i16> %a0 to <16 x i32>
%x1 = zext <16 x i16> %a1 to <16 x i32>
@@ -795,35 +470,35 @@ define <16 x i16> @test_ext_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrld $1, %xmm1
-; SSE-NEXT: paddd %xmm4, %xmm1
-; SSE-NEXT: psrld $1, %xmm0
-; SSE-NEXT: paddd %xmm5, %xmm0
+; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddd %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -831,7 +506,7 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512-LABEL: test_fixed_v8i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -843,108 +518,47 @@ define <8 x i32> @test_fixed_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
}
define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-NEXT: paddq %xmm5, %xmm7
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE2-NEXT: paddq %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: paddq %xmm6, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE2-NEXT: paddq %xmm2, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,2]
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pxor %xmm5, %xmm5
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm0[0],zero,xmm0[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; SSE4-NEXT: paddq %xmm1, %xmm3
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE4-NEXT: paddq %xmm0, %xmm2
-; SSE4-NEXT: paddq %xmm6, %xmm4
-; SSE4-NEXT: paddq %xmm7, %xmm1
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm3[0,2]
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; SSE4-NEXT: movaps %xmm1, %xmm0
-; SSE4-NEXT: movaps %xmm4, %xmm1
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX1-NEXT: vpaddq %xmm7, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero
-; AVX1-NEXT: vpaddq %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX2-NEXT: vpaddq %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],ymm2[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <8 x i32> %a0 to <8 x i64>
%x1 = zext <8 x i32> %a1 to <8 x i64>
@@ -957,35 +571,35 @@ define <8 x i32> @test_ext_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; SSE-LABEL: test_fixed_v4i64:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm1, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm4
-; SSE-NEXT: movdqa %xmm0, %xmm5
-; SSE-NEXT: pand %xmm2, %xmm5
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
; SSE-NEXT: pxor %xmm3, %xmm1
; SSE-NEXT: psrlq $1, %xmm1
-; SSE-NEXT: paddq %xmm4, %xmm1
-; SSE-NEXT: psrlq $1, %xmm0
-; SSE-NEXT: paddq %xmm5, %xmm0
+; SSE-NEXT: paddq %xmm2, %xmm1
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v4i64:
; AVX1: # %bb.0:
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
-; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpaddq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v4i64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX2-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
@@ -993,7 +607,7 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512-LABEL: test_fixed_v4i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
-; AVX512-NEXT: vpxor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
@@ -1005,187 +619,47 @@ define <4 x i64> @test_fixed_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
}
define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v4i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm4, %rdi
-; SSE2-NEXT: movq %xmm1, %r9
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %r10
-; SSE2-NEXT: movq %xmm0, %r11
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r8
-; SSE2-NEXT: movq %xmm3, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorl %ecx, %ecx
-; SSE2-NEXT: addq %r11, %rax
-; SSE2-NEXT: setb %cl
-; SSE2-NEXT: xorl %r11d, %r11d
-; SSE2-NEXT: addq %r10, %rdx
-; SSE2-NEXT: setb %r11b
-; SSE2-NEXT: xorl %r10d, %r10d
-; SSE2-NEXT: addq %r9, %rsi
-; SSE2-NEXT: setb %r10b
-; SSE2-NEXT: xorl %r9d, %r9d
-; SSE2-NEXT: addq %rdi, %r8
-; SSE2-NEXT: setb %r9b
-; SSE2-NEXT: shldq $63, %r8, %r9
-; SSE2-NEXT: shldq $63, %rsi, %r10
-; SSE2-NEXT: shldq $63, %rdx, %r11
-; SSE2-NEXT: shldq $63, %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: movq %r11, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: movq %r10, %xmm1
-; SSE2-NEXT: movq %r9, %xmm2
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v4i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movq %xmm1, %r8
-; SSE4-NEXT: pextrq $1, %xmm1, %r9
-; SSE4-NEXT: movq %xmm0, %r10
-; SSE4-NEXT: pextrq $1, %xmm0, %r11
-; SSE4-NEXT: movq %xmm3, %rdi
-; SSE4-NEXT: pextrq $1, %xmm3, %rsi
-; SSE4-NEXT: movq %xmm2, %rdx
-; SSE4-NEXT: pextrq $1, %xmm2, %rax
-; SSE4-NEXT: xorl %ecx, %ecx
-; SSE4-NEXT: addq %r11, %rax
-; SSE4-NEXT: setb %cl
-; SSE4-NEXT: xorl %r11d, %r11d
-; SSE4-NEXT: addq %r10, %rdx
-; SSE4-NEXT: setb %r11b
-; SSE4-NEXT: xorl %r10d, %r10d
-; SSE4-NEXT: addq %r9, %rsi
-; SSE4-NEXT: setb %r10b
-; SSE4-NEXT: xorl %r9d, %r9d
-; SSE4-NEXT: addq %r8, %rdi
-; SSE4-NEXT: setb %r9b
-; SSE4-NEXT: shldq $63, %rdi, %r9
-; SSE4-NEXT: shldq $63, %rsi, %r10
-; SSE4-NEXT: shldq $63, %rdx, %r11
-; SSE4-NEXT: shldq $63, %rax, %rcx
-; SSE4-NEXT: movq %rcx, %xmm1
-; SSE4-NEXT: movq %r11, %xmm0
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE4-NEXT: movq %r10, %xmm2
-; SSE4-NEXT: movq %r9, %xmm1
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pand %xmm2, %xmm4
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm3, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm2, %xmm1
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovq %xmm0, %rdi
-; AVX1-NEXT: vpextrq $1, %xmm0, %r9
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r10
-; AVX1-NEXT: vpextrq $1, %xmm0, %r11
-; AVX1-NEXT: vmovq %xmm1, %r8
-; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: xorl %ecx, %ecx
-; AVX1-NEXT: addq %r11, %rax
-; AVX1-NEXT: setb %cl
-; AVX1-NEXT: xorl %r11d, %r11d
-; AVX1-NEXT: addq %r10, %rdx
-; AVX1-NEXT: setb %r11b
-; AVX1-NEXT: xorl %r10d, %r10d
-; AVX1-NEXT: addq %r9, %rsi
-; AVX1-NEXT: setb %r10b
-; AVX1-NEXT: xorl %r9d, %r9d
-; AVX1-NEXT: addq %rdi, %r8
-; AVX1-NEXT: setb %r9b
-; AVX1-NEXT: shldq $63, %r8, %r9
-; AVX1-NEXT: shldq $63, %rsi, %r10
-; AVX1-NEXT: shldq $63, %rdx, %r11
-; AVX1-NEXT: shldq $63, %rax, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: vmovq %r11, %xmm1
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vmovq %r10, %xmm1
-; AVX1-NEXT: vmovq %r9, %xmm2
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vxorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovq %xmm0, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm0, %r9
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r10
-; AVX2-NEXT: vpextrq $1, %xmm0, %r11
-; AVX2-NEXT: vmovq %xmm1, %r8
-; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: xorl %ecx, %ecx
-; AVX2-NEXT: addq %r11, %rax
-; AVX2-NEXT: setb %cl
-; AVX2-NEXT: xorl %r11d, %r11d
-; AVX2-NEXT: addq %r10, %rdx
-; AVX2-NEXT: setb %r11b
-; AVX2-NEXT: xorl %r10d, %r10d
-; AVX2-NEXT: addq %r9, %rsi
-; AVX2-NEXT: setb %r10b
-; AVX2-NEXT: xorl %r9d, %r9d
-; AVX2-NEXT: addq %rdi, %r8
-; AVX2-NEXT: setb %r9b
-; AVX2-NEXT: shldq $63, %r8, %r9
-; AVX2-NEXT: shldq $63, %rsi, %r10
-; AVX2-NEXT: shldq $63, %rdx, %r11
-; AVX2-NEXT: shldq $63, %rax, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: vmovq %r11, %xmm1
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vmovq %r10, %xmm1
-; AVX2-NEXT: vmovq %r9, %xmm2
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: vpextrq $1, %xmm0, %r9
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT: vmovq %xmm0, %r10
-; AVX512-NEXT: vpextrq $1, %xmm0, %r11
-; AVX512-NEXT: vmovq %xmm1, %rdi
-; AVX512-NEXT: vpextrq $1, %xmm1, %rcx
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rax
-; AVX512-NEXT: vmovq %xmm0, %r8
-; AVX512-NEXT: xorl %edx, %edx
-; AVX512-NEXT: addq %r11, %rax
-; AVX512-NEXT: setb %dl
-; AVX512-NEXT: xorl %r11d, %r11d
-; AVX512-NEXT: addq %r10, %r8
-; AVX512-NEXT: setb %r11b
-; AVX512-NEXT: xorl %r10d, %r10d
-; AVX512-NEXT: addq %r9, %rcx
-; AVX512-NEXT: setb %r10b
-; AVX512-NEXT: xorl %r9d, %r9d
-; AVX512-NEXT: addq %rsi, %rdi
-; AVX512-NEXT: setb %r9b
-; AVX512-NEXT: shldq $63, %rdi, %r9
-; AVX512-NEXT: shldq $63, %rcx, %r10
-; AVX512-NEXT: shldq $63, %r8, %r11
-; AVX512-NEXT: shldq $63, %rax, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vmovq %r11, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vmovq %r10, %xmm1
-; AVX512-NEXT: vmovq %r9, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm2
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%x0 = zext <4 x i64> %a0 to <4 x i128>
%x1 = zext <4 x i64> %a1 to <4 x i128>
@@ -1202,73 +676,73 @@ define <4 x i64> @test_ext_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; SSE-LABEL: test_fixed_v64i8:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm9
-; SSE-NEXT: pand %xmm7, %xmm9
-; SSE-NEXT: movdqa %xmm2, %xmm10
-; SSE-NEXT: pand %xmm6, %xmm10
-; SSE-NEXT: movdqa %xmm1, %xmm11
-; SSE-NEXT: pand %xmm5, %xmm11
; SSE-NEXT: movdqa %xmm0, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: pand %xmm5, %xmm8
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm5
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psrlw $1, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; SSE-NEXT: pand %xmm4, %xmm3
-; SSE-NEXT: paddb %xmm9, %xmm3
-; SSE-NEXT: psrlw $1, %xmm2
-; SSE-NEXT: pand %xmm4, %xmm2
-; SSE-NEXT: paddb %xmm10, %xmm2
-; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: pand %xmm4, %xmm1
-; SSE-NEXT: paddb %xmm11, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: pand %xmm4, %xmm0
-; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: paddb %xmm5, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm6
-; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm3
-; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpaddb %xmm6, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddb %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm1, %ymm4, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm5, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v64i8:
@@ -1287,191 +761,84 @@ define <64 x i8> @test_fixed_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
}
define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
-; SSE2-LABEL: test_ext_v64i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm3, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3],xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm12
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3],xmm1[4],xmm8[4],xmm1[5],xmm8[5],xmm1[6],xmm8[6],xmm1[7],xmm8[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm13
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm8[8],xmm13[9],xmm8[9],xmm13[10],xmm8[10],xmm13[11],xmm8[11],xmm13[12],xmm8[12],xmm13[13],xmm8[13],xmm13[14],xmm8[14],xmm13[15],xmm8[15]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
-; SSE2-NEXT: paddw %xmm10, %xmm9
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; SSE2-NEXT: paddw %xmm7, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; SSE2-NEXT: paddw %xmm11, %xmm7
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; SSE2-NEXT: paddw %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15]
-; SSE2-NEXT: paddw %xmm12, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; SSE2-NEXT: paddw %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm8[8],xmm5[9],xmm8[9],xmm5[10],xmm8[10],xmm5[11],xmm8[11],xmm5[12],xmm8[12],xmm5[13],xmm8[13],xmm5[14],xmm8[14],xmm5[15],xmm8[15]
-; SSE2-NEXT: paddw %xmm13, %xmm5
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE2-NEXT: paddw %xmm4, %xmm0
-; SSE2-NEXT: psrlw $1, %xmm9
-; SSE2-NEXT: psrlw $1, %xmm3
-; SSE2-NEXT: packuswb %xmm9, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm7
-; SSE2-NEXT: psrlw $1, %xmm2
-; SSE2-NEXT: packuswb %xmm7, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm6
-; SSE2-NEXT: psrlw $1, %xmm1
-; SSE2-NEXT: packuswb %xmm6, %xmm1
-; SSE2-NEXT: psrlw $1, %xmm5
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: packuswb %xmm5, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v64i8:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm9
-; SSE4-NEXT: movdqa %xmm2, %xmm10
-; SSE4-NEXT: movdqa %xmm1, %xmm11
-; SSE4-NEXT: movdqa %xmm0, %xmm8
-; SSE4-NEXT: pxor %xmm13, %xmm13
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm13[8],xmm9[9],xmm13[9],xmm9[10],xmm13[10],xmm9[11],xmm13[11],xmm9[12],xmm13[12],xmm9[13],xmm13[13],xmm9[14],xmm13[14],xmm9[15],xmm13[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm13[8],xmm10[9],xmm13[9],xmm10[10],xmm13[10],xmm10[11],xmm13[11],xmm10[12],xmm13[12],xmm10[13],xmm13[13],xmm10[14],xmm13[14],xmm10[15],xmm13[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm13[8],xmm11[9],xmm13[9],xmm11[10],xmm13[10],xmm11[11],xmm13[11],xmm11[12],xmm13[12],xmm11[13],xmm13[13],xmm11[14],xmm13[14],xmm11[15],xmm13[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero,xmm8[4],zero,xmm8[5],zero,xmm8[6],zero,xmm8[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm13[8],xmm8[9],xmm13[9],xmm8[10],xmm13[10],xmm8[11],xmm13[11],xmm8[12],xmm13[12],xmm8[13],xmm13[13],xmm8[14],xmm13[14],xmm8[15],xmm13[15]
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero,xmm7[4],zero,xmm7[5],zero,xmm7[6],zero,xmm7[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
-; SSE4-NEXT: paddw %xmm9, %xmm7
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm13[8],xmm6[9],xmm13[9],xmm6[10],xmm13[10],xmm6[11],xmm13[11],xmm6[12],xmm13[12],xmm6[13],xmm13[13],xmm6[14],xmm13[14],xmm6[15],xmm13[15]
-; SSE4-NEXT: paddw %xmm10, %xmm6
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
-; SSE4-NEXT: paddw %xmm11, %xmm5
-; SSE4-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; SSE4-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm13[8],xmm4[9],xmm13[9],xmm4[10],xmm13[10],xmm4[11],xmm13[11],xmm4[12],xmm13[12],xmm4[13],xmm13[13],xmm4[14],xmm13[14],xmm4[15],xmm13[15]
-; SSE4-NEXT: paddw %xmm8, %xmm4
-; SSE4-NEXT: paddw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE4-NEXT: paddw %xmm14, %xmm2
-; SSE4-NEXT: paddw %xmm15, %xmm1
-; SSE4-NEXT: paddw %xmm12, %xmm0
-; SSE4-NEXT: psrlw $1, %xmm7
-; SSE4-NEXT: psrlw $1, %xmm6
-; SSE4-NEXT: psrlw $1, %xmm5
-; SSE4-NEXT: psrlw $1, %xmm4
-; SSE4-NEXT: psrlw $1, %xmm3
-; SSE4-NEXT: packuswb %xmm7, %xmm3
-; SSE4-NEXT: psrlw $1, %xmm2
-; SSE4-NEXT: packuswb %xmm6, %xmm2
-; SSE4-NEXT: psrlw $1, %xmm1
-; SSE4-NEXT: packuswb %xmm5, %xmm1
-; SSE4-NEXT: psrlw $1, %xmm0
-; SSE4-NEXT: packuswb %xmm4, %xmm0
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v64i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; SSE-NEXT: pand %xmm4, %xmm0
+; SSE-NEXT: paddb %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm8
+; SSE-NEXT: pand %xmm5, %xmm8
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: pand %xmm4, %xmm1
+; SSE-NEXT: paddb %xmm8, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm5
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: pand %xmm4, %xmm2
+; SSE-NEXT: paddb %xmm5, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm5
+; SSE-NEXT: pand %xmm7, %xmm5
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: pand %xmm4, %xmm3
+; SSE-NEXT: paddb %xmm5, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v64i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm9[8],xmm4[8],xmm9[9],xmm4[9],xmm9[10],xmm4[10],xmm9[11],xmm4[11],xmm9[12],xmm4[12],xmm9[13],xmm4[13],xmm9[14],xmm4[14],xmm9[15],xmm4[15]
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero,xmm9[4],zero,xmm9[5],zero,xmm9[6],zero,xmm9[7],zero
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm11 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; AVX1-NEXT: vpaddw %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm11[8],xmm4[8],xmm11[9],xmm4[9],xmm11[10],xmm4[10],xmm11[11],xmm4[11],xmm11[12],xmm4[12],xmm11[13],xmm4[13],xmm11[14],xmm4[14],xmm11[15],xmm4[15]
-; AVX1-NEXT: vpaddw %xmm7, %xmm12, %xmm7
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; AVX1-NEXT: vpaddw %xmm12, %xmm8, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm12[8],xmm4[8],xmm12[9],xmm4[9],xmm12[10],xmm4[10],xmm12[11],xmm4[11],xmm12[12],xmm4[12],xmm12[13],xmm4[13],xmm12[14],xmm4[14],xmm12[15],xmm4[15]
-; AVX1-NEXT: vpaddw %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero,xmm11[4],zero,xmm11[5],zero,xmm11[6],zero,xmm11[7],zero
-; AVX1-NEXT: vpaddw %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero,xmm12[4],zero,xmm12[5],zero,xmm12[6],zero,xmm12[7],zero
-; AVX1-NEXT: vpaddw %xmm2, %xmm9, %xmm2
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm6
-; AVX1-NEXT: vpsrlw $1, %xmm8, %xmm7
+; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpaddb %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v64i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero,xmm4[8],zero,xmm4[9],zero,xmm4[10],zero,xmm4[11],zero,xmm4[12],zero,xmm4[13],zero,xmm4[14],zero,xmm4[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero,xmm5[8],zero,xmm5[9],zero,xmm5[10],zero,xmm5[11],zero,xmm5[12],zero,xmm5[13],zero,xmm5[14],zero,xmm5[15],zero
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
-; AVX2-NEXT: vpaddw %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-NEXT: vpaddw %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-NEXT: vpaddw %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm4, %ymm2
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm3, %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpbroadcastb {{.*#+}} ymm2 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vpaddb %ymm1, %ymm4, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v64i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
-; AVX512-NEXT: vpaddw %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlw $1, %zmm2, %zmm1
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512-NEXT: vpaddb %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <64 x i8> %a0 to <64 x i16>
%x1 = zext <64 x i8> %a1 to <64 x i16>
@@ -1484,66 +851,66 @@ define <64 x i8> @test_ext_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; SSE-LABEL: test_fixed_v32i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pand %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: paddw %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psrlw $1, %xmm3
-; SSE-NEXT: paddw %xmm8, %xmm3
-; SSE-NEXT: psrlw $1, %xmm2
-; SSE-NEXT: paddw %xmm9, %xmm2
-; SSE-NEXT: psrlw $1, %xmm1
-; SSE-NEXT: paddw %xmm10, %xmm1
-; SSE-NEXT: psrlw $1, %xmm0
-; SSE-NEXT: paddw %xmm11, %xmm0
+; SSE-NEXT: paddw %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddw %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddw %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddw %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddw %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpaddw %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddw %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v32i16:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -1555,199 +922,70 @@ define <32 x i16> @test_fixed_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
}
define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
-; SSE2-LABEL: test_ext_v32i16:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; SSE2-NEXT: movdqa %xmm1, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
-; SSE2-NEXT: movdqa %xmm3, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; SSE2-NEXT: paddd %xmm9, %xmm10
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: movdqa %xmm5, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; SSE2-NEXT: paddd %xmm11, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; SSE2-NEXT: paddd %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm6, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; SSE2-NEXT: paddd %xmm12, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
-; SSE2-NEXT: paddd %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm7, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; SSE2-NEXT: paddd %xmm13, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; SSE2-NEXT: paddd %xmm7, %xmm3
-; SSE2-NEXT: pslld $15, %xmm10
-; SSE2-NEXT: psrad $16, %xmm10
-; SSE2-NEXT: pslld $15, %xmm0
-; SSE2-NEXT: psrad $16, %xmm0
-; SSE2-NEXT: packssdw %xmm10, %xmm0
-; SSE2-NEXT: pslld $15, %xmm9
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: pslld $15, %xmm1
-; SSE2-NEXT: psrad $16, %xmm1
-; SSE2-NEXT: packssdw %xmm9, %xmm1
-; SSE2-NEXT: pslld $15, %xmm5
-; SSE2-NEXT: psrad $16, %xmm5
-; SSE2-NEXT: pslld $15, %xmm2
-; SSE2-NEXT: psrad $16, %xmm2
-; SSE2-NEXT: packssdw %xmm5, %xmm2
-; SSE2-NEXT: pslld $15, %xmm4
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: pslld $15, %xmm3
-; SSE2-NEXT: psrad $16, %xmm3
-; SSE2-NEXT: packssdw %xmm4, %xmm3
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v32i16:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm9
-; SSE4-NEXT: movdqa %xmm2, %xmm10
-; SSE4-NEXT: movdqa %xmm1, %xmm11
-; SSE4-NEXT: movdqa %xmm0, %xmm8
-; SSE4-NEXT: pxor %xmm13, %xmm13
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm13[4],xmm8[5],xmm13[5],xmm8[6],xmm13[6],xmm8[7],xmm13[7]
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
-; SSE4-NEXT: paddd %xmm9, %xmm7
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; SSE4-NEXT: paddd %xmm10, %xmm6
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
-; SSE4-NEXT: paddd %xmm11, %xmm5
-; SSE4-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; SSE4-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
-; SSE4-NEXT: paddd %xmm8, %xmm4
-; SSE4-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE4-NEXT: paddd %xmm14, %xmm2
-; SSE4-NEXT: paddd %xmm15, %xmm1
-; SSE4-NEXT: paddd %xmm12, %xmm0
-; SSE4-NEXT: psrld $1, %xmm7
-; SSE4-NEXT: psrld $1, %xmm6
-; SSE4-NEXT: psrld $1, %xmm5
-; SSE4-NEXT: psrld $1, %xmm4
-; SSE4-NEXT: psrld $1, %xmm3
-; SSE4-NEXT: packusdw %xmm7, %xmm3
-; SSE4-NEXT: psrld $1, %xmm2
-; SSE4-NEXT: packusdw %xmm6, %xmm2
-; SSE4-NEXT: psrld $1, %xmm1
-; SSE4-NEXT: packusdw %xmm5, %xmm1
-; SSE4-NEXT: psrld $1, %xmm0
-; SSE4-NEXT: packusdw %xmm4, %xmm0
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v32i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlw $1, %xmm0
+; SSE-NEXT: paddw %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlw $1, %xmm1
+; SSE-NEXT: paddw %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlw $1, %xmm2
+; SSE-NEXT: paddw %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlw $1, %xmm3
+; SSE-NEXT: paddw %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v32i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm8 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm9
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm10 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm9 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX1-NEXT: vpaddd %xmm5, %xmm11, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; AVX1-NEXT: vpaddd %xmm7, %xmm12, %xmm7
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX1-NEXT: vpaddd %xmm12, %xmm8, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
-; AVX1-NEXT: vpaddd %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm11[0],zero,xmm11[1],zero,xmm11[2],zero,xmm11[3],zero
-; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
-; AVX1-NEXT: vpaddd %xmm2, %xmm9, %xmm2
-; AVX1-NEXT: vpsrld $1, %xmm5, %xmm5
-; AVX1-NEXT: vpsrld $1, %xmm7, %xmm6
-; AVX1-NEXT: vpsrld $1, %xmm8, %xmm7
-; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpackusdw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
-; AVX1-NEXT: vpackusdw %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm7, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddw %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v32i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX2-NEXT: vpaddd %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpaddd %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX2-NEXT: vpaddd %ymm3, %ymm5, %ymm3
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $1, %ymm4, %ymm2
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm3, %ymm2
-; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
-; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddw %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v32i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
-; AVX512-NEXT: vpaddd %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrld $1, %zmm2, %zmm1
-; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: vpmovdw %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlw $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <32 x i16> %a0 to <32 x i32>
%x1 = zext <32 x i16> %a1 to <32 x i32>
@@ -1760,66 +998,66 @@ define <32 x i16> @test_ext_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; SSE-LABEL: test_fixed_v16i32:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pand %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: paddd %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psrld $1, %xmm3
-; SSE-NEXT: paddd %xmm8, %xmm3
-; SSE-NEXT: psrld $1, %xmm2
-; SSE-NEXT: paddd %xmm9, %xmm2
-; SSE-NEXT: psrld $1, %xmm1
-; SSE-NEXT: paddd %xmm10, %xmm1
-; SSE-NEXT: psrld $1, %xmm0
-; SSE-NEXT: paddd %xmm11, %xmm0
+; SSE-NEXT: paddd %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrld $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddd %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddd %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v16i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxord %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -1831,193 +1069,70 @@ define <16 x i32> @test_fixed_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
}
define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
-; SSE2-LABEL: test_ext_v16i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm8, %xmm8
-; SSE2-NEXT: movdqa %xmm3, %xmm10
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
-; SSE2-NEXT: movdqa %xmm2, %xmm11
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm8[2],xmm11[3],xmm8[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm12
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm12 = xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm13
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm13 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
-; SSE2-NEXT: movdqa %xmm7, %xmm9
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; SSE2-NEXT: paddq %xmm10, %xmm9
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm7, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm7
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; SSE2-NEXT: paddq %xmm11, %xmm7
-; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm6, %xmm2
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
-; SSE2-NEXT: paddq %xmm12, %xmm6
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; SSE2-NEXT: paddq %xmm13, %xmm5
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
-; SSE2-NEXT: paddq %xmm4, %xmm0
-; SSE2-NEXT: psrlq $1, %xmm9
-; SSE2-NEXT: psrlq $1, %xmm3
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm9[0,2]
-; SSE2-NEXT: psrlq $1, %xmm7
-; SSE2-NEXT: psrlq $1, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm7[0,2]
-; SSE2-NEXT: psrlq $1, %xmm6
-; SSE2-NEXT: psrlq $1, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm6[0,2]
-; SSE2-NEXT: psrlq $1, %xmm5
-; SSE2-NEXT: psrlq $1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v16i32:
-; SSE4: # %bb.0:
-; SSE4-NEXT: movdqa %xmm3, %xmm9
-; SSE4-NEXT: movdqa %xmm2, %xmm10
-; SSE4-NEXT: movdqa %xmm1, %xmm11
-; SSE4-NEXT: movdqa %xmm0, %xmm8
-; SSE4-NEXT: pxor %xmm13, %xmm13
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero
-; SSE4-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm14 = xmm2[0],zero,xmm2[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm10 = xmm10[2],xmm13[2],xmm10[3],xmm13[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm15 = xmm1[0],zero,xmm1[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm11 = xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm12 = xmm8[0],zero,xmm8[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm13[2],xmm8[3],xmm13[3]
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm7[0],zero,xmm7[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm13[2],xmm7[3],xmm13[3]
-; SSE4-NEXT: paddq %xmm9, %xmm7
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
-; SSE4-NEXT: paddq %xmm10, %xmm6
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm13[2],xmm5[3],xmm13[3]
-; SSE4-NEXT: paddq %xmm11, %xmm5
-; SSE4-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero
-; SSE4-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3]
-; SSE4-NEXT: paddq %xmm8, %xmm4
-; SSE4-NEXT: paddq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE4-NEXT: paddq %xmm14, %xmm2
-; SSE4-NEXT: paddq %xmm15, %xmm1
-; SSE4-NEXT: paddq %xmm12, %xmm0
-; SSE4-NEXT: psrlq $1, %xmm7
-; SSE4-NEXT: psrlq $1, %xmm6
-; SSE4-NEXT: psrlq $1, %xmm5
-; SSE4-NEXT: psrlq $1, %xmm4
-; SSE4-NEXT: psrlq $1, %xmm3
-; SSE4-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm7[0,2]
-; SSE4-NEXT: psrlq $1, %xmm2
-; SSE4-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm6[0,2]
-; SSE4-NEXT: psrlq $1, %xmm1
-; SSE4-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
-; SSE4-NEXT: psrlq $1, %xmm0
-; SSE4-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrld $1, %xmm0
+; SSE-NEXT: paddd %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrld $1, %xmm1
+; SSE-NEXT: paddd %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrld $1, %xmm2
+; SSE-NEXT: paddd %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrld $1, %xmm3
+; SSE-NEXT: paddd %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v16i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm8
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm9 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm10 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm8 = xmm8[0],zero,xmm8[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX1-NEXT: vpaddq %xmm6, %xmm12, %xmm6
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm12 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX1-NEXT: vpaddq %xmm7, %xmm12, %xmm7
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm13 = xmm12[2],xmm5[2],xmm12[3],xmm5[3]
-; AVX1-NEXT: vpaddq %xmm13, %xmm9, %xmm9
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX1-NEXT: vpaddq %xmm5, %xmm10, %xmm5
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm11[0],zero,xmm11[1],zero
-; AVX1-NEXT: vpaddq %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm12[0],zero,xmm12[1],zero
-; AVX1-NEXT: vpaddq %xmm3, %xmm8, %xmm3
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlq $1, %xmm6, %xmm2
-; AVX1-NEXT: vpsrlq $1, %xmm7, %xmm6
-; AVX1-NEXT: vpsrlq $1, %xmm9, %xmm7
-; AVX1-NEXT: vpsrlq $1, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlq $1, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v16i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX2-NEXT: vpaddq %ymm6, %ymm4, %ymm4
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm6
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX2-NEXT: vpaddq %ymm6, %ymm5, %ymm5
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm5[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
-; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm4[2,3]
-; AVX2-NEXT: vpsrlq $1, %ymm2, %ymm2
-; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vshufps {{.*#+}} ymm1 = ymm1[0,2],ymm2[0,2],ymm1[4,6],ymm2[4,6]
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v16i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm3 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero
-; AVX512-NEXT: vpaddq %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpsrlq $1, %zmm2, %zmm1
-; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrld $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <16 x i32> %a0 to <16 x i64>
%x1 = zext <16 x i32> %a1 to <16 x i64>
@@ -2030,66 +1145,66 @@ define <16 x i32> @test_ext_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; SSE-LABEL: test_fixed_v8i64:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pand %xmm7, %xmm8
-; SSE-NEXT: movdqa %xmm2, %xmm9
-; SSE-NEXT: pand %xmm6, %xmm9
-; SSE-NEXT: movdqa %xmm1, %xmm10
-; SSE-NEXT: pand %xmm5, %xmm10
-; SSE-NEXT: movdqa %xmm0, %xmm11
-; SSE-NEXT: pand %xmm4, %xmm11
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: paddq %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
; SSE-NEXT: pxor %xmm7, %xmm3
; SSE-NEXT: psrlq $1, %xmm3
-; SSE-NEXT: paddq %xmm8, %xmm3
-; SSE-NEXT: psrlq $1, %xmm2
-; SSE-NEXT: paddq %xmm9, %xmm2
-; SSE-NEXT: psrlq $1, %xmm1
-; SSE-NEXT: paddq %xmm10, %xmm1
-; SSE-NEXT: psrlq $1, %xmm0
-; SSE-NEXT: paddq %xmm11, %xmm0
+; SSE-NEXT: paddq %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX1-LABEL: test_fixed_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm4
-; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm5
-; AVX1-NEXT: vxorps %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorps %ymm1, %ymm3, %ymm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
-; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm6
-; AVX1-NEXT: vpaddq %xmm0, %xmm6, %xmm0
-; AVX1-NEXT: vpaddq %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
-; AVX1-NEXT: vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddq %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_fixed_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm4
-; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm5
-; AVX2-NEXT: vpxor %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpxor %ymm1, %ymm3, %ymm1
-; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm4, %ymm1
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
-; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_fixed_v8i64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
-; AVX512-NEXT: vpxorq %zmm0, %zmm1, %zmm0
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
@@ -2101,433 +1216,70 @@ define <8 x i64> @test_fixed_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
}
define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
-; SSE2-LABEL: test_ext_v8i64:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pushq %rbp
-; SSE2-NEXT: pushq %r15
-; SSE2-NEXT: pushq %r14
-; SSE2-NEXT: pushq %r13
-; SSE2-NEXT: pushq %r12
-; SSE2-NEXT: pushq %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %rbx
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
-; SSE2-NEXT: movq %xmm3, %r12
-; SSE2-NEXT: movq %xmm2, %rbp
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
-; SSE2-NEXT: movq %xmm2, %r13
-; SSE2-NEXT: movq %xmm1, %r15
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; SSE2-NEXT: movq %xmm1, %r14
-; SSE2-NEXT: movq %xmm0, %r11
-; SSE2-NEXT: movq %xmm7, %r10
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %r9
-; SSE2-NEXT: movq %xmm6, %r8
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: movq %xmm5, %rsi
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,2,3]
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm4, %rax
-; SSE2-NEXT: xorl %ecx, %ecx
-; SSE2-NEXT: addq %r11, %rax
-; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: setb %cl
-; SSE2-NEXT: xorl %r11d, %r11d
-; SSE2-NEXT: addq %r14, %rdx
-; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE2-NEXT: setb %r11b
-; SSE2-NEXT: xorl %r14d, %r14d
-; SSE2-NEXT: addq %r15, %rsi
-; SSE2-NEXT: setb %r14b
-; SSE2-NEXT: xorl %r15d, %r15d
-; SSE2-NEXT: addq %r13, %rdi
-; SSE2-NEXT: setb %r15b
-; SSE2-NEXT: xorl %r13d, %r13d
-; SSE2-NEXT: addq %rbp, %r8
-; SSE2-NEXT: setb %r13b
-; SSE2-NEXT: xorl %ebp, %ebp
-; SSE2-NEXT: addq %r12, %r9
-; SSE2-NEXT: setb %bpl
-; SSE2-NEXT: xorl %r12d, %r12d
-; SSE2-NEXT: addq %rbx, %r10
-; SSE2-NEXT: movq %xmm8, %rdx
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,2,3]
-; SSE2-NEXT: setb %r12b
-; SSE2-NEXT: movq %xmm0, %rax
-; SSE2-NEXT: xorl %ebx, %ebx
-; SSE2-NEXT: addq %rdx, %rax
-; SSE2-NEXT: setb %bl
-; SSE2-NEXT: shldq $63, %rax, %rbx
-; SSE2-NEXT: shldq $63, %r10, %r12
-; SSE2-NEXT: shldq $63, %r9, %rbp
-; SSE2-NEXT: shldq $63, %r8, %r13
-; SSE2-NEXT: shldq $63, %rdi, %r15
-; SSE2-NEXT: shldq $63, %rsi, %r14
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rax, %r11
-; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE2-NEXT: shldq $63, %rax, %rcx
-; SSE2-NEXT: movq %rcx, %xmm0
-; SSE2-NEXT: movq %r11, %xmm4
-; SSE2-NEXT: movq %r14, %xmm1
-; SSE2-NEXT: movq %r15, %xmm5
-; SSE2-NEXT: movq %r13, %xmm2
-; SSE2-NEXT: movq %rbp, %xmm6
-; SSE2-NEXT: movq %r12, %xmm3
-; SSE2-NEXT: movq %rbx, %xmm7
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE2-NEXT: popq %rbx
-; SSE2-NEXT: popq %r12
-; SSE2-NEXT: popq %r13
-; SSE2-NEXT: popq %r14
-; SSE2-NEXT: popq %r15
-; SSE2-NEXT: popq %rbp
-; SSE2-NEXT: retq
-;
-; SSE4-LABEL: test_ext_v8i64:
-; SSE4: # %bb.0:
-; SSE4-NEXT: pushq %rbp
-; SSE4-NEXT: pushq %r15
-; SSE4-NEXT: pushq %r14
-; SSE4-NEXT: pushq %r13
-; SSE4-NEXT: pushq %r12
-; SSE4-NEXT: pushq %rbx
-; SSE4-NEXT: pextrq $1, %xmm3, %r14
-; SSE4-NEXT: movq %xmm2, %r13
-; SSE4-NEXT: pextrq $1, %xmm2, %rbp
-; SSE4-NEXT: movq %xmm1, %r12
-; SSE4-NEXT: pextrq $1, %xmm1, %r15
-; SSE4-NEXT: movq %xmm0, %rbx
-; SSE4-NEXT: pextrq $1, %xmm0, %r11
-; SSE4-NEXT: pextrq $1, %xmm7, %r10
-; SSE4-NEXT: movq %xmm6, %r9
-; SSE4-NEXT: pextrq $1, %xmm6, %r8
-; SSE4-NEXT: movq %xmm5, %rdi
-; SSE4-NEXT: pextrq $1, %xmm5, %rsi
-; SSE4-NEXT: movq %xmm4, %rdx
-; SSE4-NEXT: pextrq $1, %xmm4, %rax
-; SSE4-NEXT: xorl %ecx, %ecx
-; SSE4-NEXT: addq %r11, %rax
-; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: setb %cl
-; SSE4-NEXT: xorl %r11d, %r11d
-; SSE4-NEXT: addq %rbx, %rdx
-; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; SSE4-NEXT: setb %r11b
-; SSE4-NEXT: xorl %ebx, %ebx
-; SSE4-NEXT: addq %r15, %rsi
-; SSE4-NEXT: setb %bl
-; SSE4-NEXT: xorl %r15d, %r15d
-; SSE4-NEXT: addq %r12, %rdi
-; SSE4-NEXT: setb %r15b
-; SSE4-NEXT: xorl %r12d, %r12d
-; SSE4-NEXT: addq %rbp, %r8
-; SSE4-NEXT: setb %r12b
-; SSE4-NEXT: xorl %ebp, %ebp
-; SSE4-NEXT: addq %r13, %r9
-; SSE4-NEXT: setb %bpl
-; SSE4-NEXT: xorl %r13d, %r13d
-; SSE4-NEXT: addq %r14, %r10
-; SSE4-NEXT: movq %xmm3, %rdx
-; SSE4-NEXT: setb %r13b
-; SSE4-NEXT: movq %xmm7, %rax
-; SSE4-NEXT: xorl %r14d, %r14d
-; SSE4-NEXT: addq %rdx, %rax
-; SSE4-NEXT: setb %r14b
-; SSE4-NEXT: shldq $63, %rax, %r14
-; SSE4-NEXT: shldq $63, %r10, %r13
-; SSE4-NEXT: shldq $63, %r9, %rbp
-; SSE4-NEXT: shldq $63, %r8, %r12
-; SSE4-NEXT: shldq $63, %rdi, %r15
-; SSE4-NEXT: shldq $63, %rsi, %rbx
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rax, %r11
-; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; SSE4-NEXT: shldq $63, %rax, %rcx
-; SSE4-NEXT: movq %rcx, %xmm4
-; SSE4-NEXT: movq %r11, %xmm0
-; SSE4-NEXT: movq %rbx, %xmm5
-; SSE4-NEXT: movq %r15, %xmm1
-; SSE4-NEXT: movq %r12, %xmm6
-; SSE4-NEXT: movq %rbp, %xmm2
-; SSE4-NEXT: movq %r13, %xmm7
-; SSE4-NEXT: movq %r14, %xmm3
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm6[0]
-; SSE4-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; SSE4-NEXT: popq %rbx
-; SSE4-NEXT: popq %r12
-; SSE4-NEXT: popq %r13
-; SSE4-NEXT: popq %r14
-; SSE4-NEXT: popq %r15
-; SSE4-NEXT: popq %rbp
-; SSE4-NEXT: retq
+; SSE-LABEL: test_ext_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm0, %xmm8
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: pxor %xmm4, %xmm0
+; SSE-NEXT: psrlq $1, %xmm0
+; SSE-NEXT: paddq %xmm8, %xmm0
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: pand %xmm5, %xmm4
+; SSE-NEXT: pxor %xmm5, %xmm1
+; SSE-NEXT: psrlq $1, %xmm1
+; SSE-NEXT: paddq %xmm4, %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pand %xmm6, %xmm4
+; SSE-NEXT: pxor %xmm6, %xmm2
+; SSE-NEXT: psrlq $1, %xmm2
+; SSE-NEXT: paddq %xmm4, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: pand %xmm7, %xmm4
+; SSE-NEXT: pxor %xmm7, %xmm3
+; SSE-NEXT: psrlq $1, %xmm3
+; SSE-NEXT: paddq %xmm4, %xmm3
+; SSE-NEXT: retq
;
; AVX1-LABEL: test_ext_v8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: pushq %r15
-; AVX1-NEXT: pushq %r14
-; AVX1-NEXT: pushq %r13
-; AVX1-NEXT: pushq %r12
-; AVX1-NEXT: pushq %rbx
-; AVX1-NEXT: vpextrq $1, %xmm1, %rbx
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vmovq %xmm4, %r15
-; AVX1-NEXT: vpextrq $1, %xmm4, %rbp
-; AVX1-NEXT: vmovq %xmm0, %r13
-; AVX1-NEXT: vpextrq $1, %xmm0, %r12
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r14
-; AVX1-NEXT: vpextrq $1, %xmm0, %r11
-; AVX1-NEXT: vpextrq $1, %xmm3, %r10
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %r9
-; AVX1-NEXT: vpextrq $1, %xmm0, %r8
-; AVX1-NEXT: vmovq %xmm2, %rdi
-; AVX1-NEXT: vpextrq $1, %xmm2, %rsi
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0
-; AVX1-NEXT: vmovq %xmm0, %rdx
-; AVX1-NEXT: vpextrq $1, %xmm0, %rax
-; AVX1-NEXT: xorl %ecx, %ecx
-; AVX1-NEXT: addq %r11, %rax
-; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: setb %cl
-; AVX1-NEXT: xorl %r11d, %r11d
-; AVX1-NEXT: addq %r14, %rdx
-; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX1-NEXT: setb %r11b
-; AVX1-NEXT: xorl %r14d, %r14d
-; AVX1-NEXT: addq %r12, %rsi
-; AVX1-NEXT: setb %r14b
-; AVX1-NEXT: xorl %r12d, %r12d
-; AVX1-NEXT: addq %r13, %rdi
-; AVX1-NEXT: setb %r12b
-; AVX1-NEXT: xorl %r13d, %r13d
-; AVX1-NEXT: addq %rbp, %r8
-; AVX1-NEXT: setb %r13b
-; AVX1-NEXT: xorl %ebp, %ebp
-; AVX1-NEXT: addq %r15, %r9
-; AVX1-NEXT: setb %bpl
-; AVX1-NEXT: xorl %r15d, %r15d
-; AVX1-NEXT: addq %rbx, %r10
-; AVX1-NEXT: vmovq %xmm1, %rdx
-; AVX1-NEXT: setb %r15b
-; AVX1-NEXT: vmovq %xmm3, %rax
-; AVX1-NEXT: xorl %ebx, %ebx
-; AVX1-NEXT: addq %rdx, %rax
-; AVX1-NEXT: setb %bl
-; AVX1-NEXT: shldq $63, %rax, %rbx
-; AVX1-NEXT: shldq $63, %r10, %r15
-; AVX1-NEXT: shldq $63, %r9, %rbp
-; AVX1-NEXT: shldq $63, %r8, %r13
-; AVX1-NEXT: shldq $63, %rdi, %r12
-; AVX1-NEXT: shldq $63, %rsi, %r14
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rax, %r11
-; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX1-NEXT: shldq $63, %rax, %rcx
-; AVX1-NEXT: vmovq %rcx, %xmm0
-; AVX1-NEXT: vmovq %r11, %xmm1
-; AVX1-NEXT: vmovq %r14, %xmm2
-; AVX1-NEXT: vmovq %r12, %xmm3
-; AVX1-NEXT: vmovq %r13, %xmm4
-; AVX1-NEXT: vmovq %rbp, %xmm5
-; AVX1-NEXT: vmovq %r15, %xmm6
-; AVX1-NEXT: vmovq %rbx, %xmm7
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-NEXT: popq %rbx
-; AVX1-NEXT: popq %r12
-; AVX1-NEXT: popq %r13
-; AVX1-NEXT: popq %r14
-; AVX1-NEXT: popq %r15
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm4
+; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
+; AVX1-NEXT: vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm2, %xmm2
+; AVX1-NEXT: vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm3, %ymm1, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; AVX1-NEXT: vxorps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1
+; AVX1-NEXT: vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_ext_v8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: pushq %r15
-; AVX2-NEXT: pushq %r14
-; AVX2-NEXT: pushq %r13
-; AVX2-NEXT: pushq %r12
-; AVX2-NEXT: pushq %rbx
-; AVX2-NEXT: vpextrq $1, %xmm1, %rbx
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
-; AVX2-NEXT: vmovq %xmm4, %r15
-; AVX2-NEXT: vpextrq $1, %xmm4, %rbp
-; AVX2-NEXT: vmovq %xmm0, %r13
-; AVX2-NEXT: vpextrq $1, %xmm0, %r12
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r14
-; AVX2-NEXT: vpextrq $1, %xmm0, %r11
-; AVX2-NEXT: vpextrq $1, %xmm3, %r10
-; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %r9
-; AVX2-NEXT: vpextrq $1, %xmm0, %r8
-; AVX2-NEXT: vmovq %xmm2, %rdi
-; AVX2-NEXT: vpextrq $1, %xmm2, %rsi
-; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm0
-; AVX2-NEXT: vmovq %xmm0, %rdx
-; AVX2-NEXT: vpextrq $1, %xmm0, %rax
-; AVX2-NEXT: xorl %ecx, %ecx
-; AVX2-NEXT: addq %r11, %rax
-; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: setb %cl
-; AVX2-NEXT: xorl %r11d, %r11d
-; AVX2-NEXT: addq %r14, %rdx
-; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX2-NEXT: setb %r11b
-; AVX2-NEXT: xorl %r14d, %r14d
-; AVX2-NEXT: addq %r12, %rsi
-; AVX2-NEXT: setb %r14b
-; AVX2-NEXT: xorl %r12d, %r12d
-; AVX2-NEXT: addq %r13, %rdi
-; AVX2-NEXT: setb %r12b
-; AVX2-NEXT: xorl %r13d, %r13d
-; AVX2-NEXT: addq %rbp, %r8
-; AVX2-NEXT: setb %r13b
-; AVX2-NEXT: xorl %ebp, %ebp
-; AVX2-NEXT: addq %r15, %r9
-; AVX2-NEXT: setb %bpl
-; AVX2-NEXT: xorl %r15d, %r15d
-; AVX2-NEXT: addq %rbx, %r10
-; AVX2-NEXT: vmovq %xmm1, %rdx
-; AVX2-NEXT: setb %r15b
-; AVX2-NEXT: vmovq %xmm3, %rax
-; AVX2-NEXT: xorl %ebx, %ebx
-; AVX2-NEXT: addq %rdx, %rax
-; AVX2-NEXT: setb %bl
-; AVX2-NEXT: shldq $63, %rax, %rbx
-; AVX2-NEXT: shldq $63, %r10, %r15
-; AVX2-NEXT: shldq $63, %r9, %rbp
-; AVX2-NEXT: shldq $63, %r8, %r13
-; AVX2-NEXT: shldq $63, %rdi, %r12
-; AVX2-NEXT: shldq $63, %rsi, %r14
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rax, %r11
-; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX2-NEXT: shldq $63, %rax, %rcx
-; AVX2-NEXT: vmovq %rcx, %xmm0
-; AVX2-NEXT: vmovq %r11, %xmm1
-; AVX2-NEXT: vmovq %r14, %xmm2
-; AVX2-NEXT: vmovq %r12, %xmm3
-; AVX2-NEXT: vmovq %r13, %xmm4
-; AVX2-NEXT: vmovq %rbp, %xmm5
-; AVX2-NEXT: vmovq %r15, %xmm6
-; AVX2-NEXT: vmovq %rbx, %xmm7
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX2-NEXT: popq %rbx
-; AVX2-NEXT: popq %r12
-; AVX2-NEXT: popq %r13
-; AVX2-NEXT: popq %r14
-; AVX2-NEXT: popq %r15
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddq %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpand %ymm3, %ymm1, %ymm2
+; AVX2-NEXT: vpxor %ymm3, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT: vpaddq %ymm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_ext_v8i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: pushq %rbp
-; AVX512-NEXT: pushq %r15
-; AVX512-NEXT: pushq %r14
-; AVX512-NEXT: pushq %r13
-; AVX512-NEXT: pushq %r12
-; AVX512-NEXT: pushq %rbx
-; AVX512-NEXT: vpextrq $1, %xmm0, %r10
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %r13
-; AVX512-NEXT: vmovq %xmm2, %r15
-; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; AVX512-NEXT: vmovq %xmm2, %rbp
-; AVX512-NEXT: vpextrq $1, %xmm2, %r12
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %r14
-; AVX512-NEXT: vpextrq $1, %xmm2, %rbx
-; AVX512-NEXT: vpextrq $1, %xmm1, %r9
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %r8
-; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
-; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rcx
-; AVX512-NEXT: vmovq %xmm2, %r11
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT: vmovq %xmm2, %rdi
-; AVX512-NEXT: vpextrq $1, %xmm2, %rax
-; AVX512-NEXT: xorl %esi, %esi
-; AVX512-NEXT: addq %rbx, %rax
-; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: setb %sil
-; AVX512-NEXT: xorl %ebx, %ebx
-; AVX512-NEXT: addq %r14, %rdi
-; AVX512-NEXT: setb %bl
-; AVX512-NEXT: xorl %r14d, %r14d
-; AVX512-NEXT: addq %r12, %rcx
-; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT: setb %r14b
-; AVX512-NEXT: xorl %r12d, %r12d
-; AVX512-NEXT: addq %rbp, %r11
-; AVX512-NEXT: setb %r12b
-; AVX512-NEXT: xorl %ebp, %ebp
-; AVX512-NEXT: addq %r13, %rdx
-; AVX512-NEXT: setb %bpl
-; AVX512-NEXT: xorl %r13d, %r13d
-; AVX512-NEXT: addq %r15, %r8
-; AVX512-NEXT: setb %r13b
-; AVX512-NEXT: xorl %r15d, %r15d
-; AVX512-NEXT: addq %r10, %r9
-; AVX512-NEXT: vmovq %xmm0, %rcx
-; AVX512-NEXT: setb %r15b
-; AVX512-NEXT: vmovq %xmm1, %rax
-; AVX512-NEXT: xorl %r10d, %r10d
-; AVX512-NEXT: addq %rcx, %rax
-; AVX512-NEXT: setb %r10b
-; AVX512-NEXT: shldq $63, %rax, %r10
-; AVX512-NEXT: shldq $63, %r9, %r15
-; AVX512-NEXT: shldq $63, %r8, %r13
-; AVX512-NEXT: shldq $63, %rdx, %rbp
-; AVX512-NEXT: shldq $63, %r11, %r12
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shldq $63, %rax, %r14
-; AVX512-NEXT: shldq $63, %rdi, %rbx
-; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT: shldq $63, %rax, %rsi
-; AVX512-NEXT: vmovq %rsi, %xmm0
-; AVX512-NEXT: vmovq %rbx, %xmm1
-; AVX512-NEXT: vmovq %r14, %xmm2
-; AVX512-NEXT: vmovq %r12, %xmm3
-; AVX512-NEXT: vmovq %rbp, %xmm4
-; AVX512-NEXT: vmovq %r13, %xmm5
-; AVX512-NEXT: vmovq %r15, %xmm6
-; AVX512-NEXT: vmovq %r10, %xmm7
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0]
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm7[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: popq %rbx
-; AVX512-NEXT: popq %r12
-; AVX512-NEXT: popq %r13
-; AVX512-NEXT: popq %r14
-; AVX512-NEXT: popq %r15
-; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm2
+; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpsrlq $1, %zmm0, %zmm0
+; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; AVX512-NEXT: retq
%x0 = zext <8 x i64> %a0 to <8 x i128>
%x1 = zext <8 x i64> %a1 to <8 x i128>
@@ -2537,3 +1289,6 @@ define <8 x i64> @test_ext_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
ret <8 x i64> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SSE2: {{.*}}
+; SSE4: {{.*}}
diff --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
index f3a8ca4de9975..ad08ecb35fdef 100644
--- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll
+++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
@@ -48,10 +48,10 @@ define dso_local void @add512(ptr %a, ptr %b, ptr %c) "min-legal-vector-width"="
define dso_local void @avg_v64i8_256(ptr %a, ptr %b) "min-legal-vector-width"="256" {
; CHECK-LABEL: avg_v64i8_256:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa (%rsi), %ymm0
-; CHECK-NEXT: vmovdqa 32(%rsi), %ymm1
-; CHECK-NEXT: vpavgb (%rdi), %ymm0, %ymm0
-; CHECK-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1
+; CHECK-NEXT: vmovdqa (%rdi), %ymm0
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
+; CHECK-NEXT: vpavgb (%rsi), %ymm0, %ymm0
+; CHECK-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1
; CHECK-NEXT: vmovdqu %ymm1, (%rax)
; CHECK-NEXT: vmovdqu %ymm0, (%rax)
; CHECK-NEXT: vzeroupper
More information about the llvm-commits
mailing list