[llvm] r332832 - [X86][SSE] Support v4i32 rotations (PR37426)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon May 21 02:45:59 PDT 2018
Author: rksimon
Date: Mon May 21 02:45:59 2018
New Revision: 332832
URL: http://llvm.org/viewvc/llvm-project?rev=332832&view=rev
Log:
[X86][SSE] Support v4i32 rotations (PR37426)
As suggested by Fabian on PR37426, we can use PMULUDQ to perform v4i32 vector rotations as the upper 32bits of the multiply will contain the 'wrapped' bits of the rotation.
v8i16/v16i8 rotations would be straightforward to add to lowerRotate in the future - ideally we'd mostly share code with the vector shifts lowering.
Differential Revision: https://reviews.llvm.org/D46954
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=332832&r1=332831&r2=332832&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon May 21 02:45:59 2018
@@ -899,6 +899,8 @@ X86TargetLowering::X86TargetLowering(con
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
}
+
+ setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
@@ -1030,6 +1032,8 @@ X86TargetLowering::X86TargetLowering(con
setOperationAction(ISD::SRA, VT, Custom);
}
+ setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
+
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
@@ -23247,6 +23251,51 @@ static SDValue LowerScalarVariableShift(
return SDValue();
}
+// Convert a shift/rotate left amount to a multiplication scale factor.
+static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = Amt.getSimpleValueType();
+ bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
+
+ if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
+ (Subtarget.hasInt256() && VT == MVT::v16i16))) {
+ SmallVector<SDValue, 8> Elts;
+ MVT SVT = VT.getVectorElementType();
+ unsigned SVTBits = SVT.getSizeInBits();
+ APInt One(SVTBits, 1);
+ unsigned NumElems = VT.getVectorNumElements();
+
+ for (unsigned i = 0; i != NumElems; ++i) {
+ SDValue Op = Amt->getOperand(i);
+ if (Op->isUndef()) {
+ Elts.push_back(Op);
+ continue;
+ }
+
+ ConstantSDNode *ND = cast<ConstantSDNode>(Op);
+ APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
+ uint64_t ShAmt = C.getZExtValue();
+ if (ShAmt >= SVTBits) {
+ Elts.push_back(DAG.getUNDEF(SVT));
+ continue;
+ }
+ Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
+ }
+ return DAG.getBuildVector(VT, dl, Elts);
+ }
+
+ if (VT == MVT::v4i32) {
+ Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
+ DAG.getConstant(0x3f800000U, dl, VT));
+ Amt = DAG.getBitcast(MVT::v4f32, Amt);
+ return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
+ }
+
+ return SDValue();
+}
+
static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -23308,46 +23357,9 @@ static SDValue LowerShift(SDValue Op, co
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
- // Do this only if the vector shift count is a constant build_vector.
- if (ConstantAmt && Op.getOpcode() == ISD::SHL &&
- (VT == MVT::v8i16 || VT == MVT::v4i32 ||
- (Subtarget.hasInt256() && VT == MVT::v16i16))) {
- SmallVector<SDValue, 8> Elts;
- MVT SVT = VT.getVectorElementType();
- unsigned SVTBits = SVT.getSizeInBits();
- APInt One(SVTBits, 1);
- unsigned NumElems = VT.getVectorNumElements();
-
- for (unsigned i=0; i !=NumElems; ++i) {
- SDValue Op = Amt->getOperand(i);
- if (Op->isUndef()) {
- Elts.push_back(Op);
- continue;
- }
-
- ConstantSDNode *ND = cast<ConstantSDNode>(Op);
- APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
- uint64_t ShAmt = C.getZExtValue();
- if (ShAmt >= SVTBits) {
- Elts.push_back(DAG.getUNDEF(SVT));
- continue;
- }
- Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
- }
- SDValue BV = DAG.getBuildVector(VT, dl, Elts);
- return DAG.getNode(ISD::MUL, dl, VT, R, BV);
- }
-
- // Lower SHL with variable shift amount.
- if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
- Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
-
- Op = DAG.getNode(ISD::ADD, dl, VT, Op,
- DAG.getConstant(0x3f800000U, dl, VT));
- Op = DAG.getBitcast(MVT::v4f32, Op);
- Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
- return DAG.getNode(ISD::MUL, dl, VT, Op, R);
- }
+ if (Op.getOpcode() == ISD::SHL)
+ if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
+ return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
// If possible, lower this shift as a sequence of two shifts by
// constant plus a MOVSS/MOVSD/PBLEND instead of scalarizing it.
@@ -23723,6 +23735,8 @@ static SDValue LowerShift(SDValue Op, co
static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
+ assert(VT.isVector() && "Custom lowering only for vector rotates!");
+
SDLoc DL(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
@@ -23748,31 +23762,86 @@ static SDValue LowerRotate(SDValue Op, c
return Op;
}
- assert(VT.isVector() && "Custom lowering only for vector rotates!");
- assert(Subtarget.hasXOP() && "XOP support required for vector rotates!");
assert((Opcode == ISD::ROTL) && "Only ROTL supported");
// XOP has 128-bit vector variable + immediate rotates.
// +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
+ if (Subtarget.hasXOP()) {
+ // Split 256-bit integers.
+ if (VT.is256BitVector())
+ return Lower256IntArith(Op, DAG);
+ assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
- // Split 256-bit integers.
- if (VT.is256BitVector())
+ // Attempt to rotate by immediate.
+ if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
+ if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
+ uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
+ assert(RotateAmt < EltSizeInBits && "Rotation out of range");
+ return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
+ DAG.getConstant(RotateAmt, DL, MVT::i8));
+ }
+ }
+
+ // Use general rotate by variable (per-element).
+ return Op;
+ }
+
+ // Split 256-bit integers on pre-AVX2 targets.
+ if (VT.is256BitVector() && !Subtarget.hasAVX2())
return Lower256IntArith(Op, DAG);
- assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
+ assert((VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget.hasAVX2())) &&
+ "Only v4i32/v8i32 vector rotates supported");
- // Attempt to rotate by immediate.
+ // Rotate by an uniform constant - expand back to shifts.
+ // TODO - legalizers should be able to handle this.
if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
assert(RotateAmt < EltSizeInBits && "Rotation out of range");
- return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
- DAG.getConstant(RotateAmt, DL, MVT::i8));
+ if (RotateAmt == 0)
+ return R;
+
+ SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, VT, R,
+ RotateAmt, DAG);
+ SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, VT, R,
+ EltSizeInBits - RotateAmt, DAG);
+ return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
}
- // Use general rotate by variable (per-element).
- return Op;
+ // AVX2 - best to fallback to variable shifts.
+ // TODO - legalizers should be able to handle this.
+ if (Subtarget.hasAVX2()) {
+ SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
+ AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
+ SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
+ return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
+ }
+
+ // As with shifts, convert the rotation amount to a multiplication factor,
+ // and make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
+ // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
+ // that can then be OR'd with the lower 32-bits.
+ Amt = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
+
+ static const int OddMask[] = {1, -1, 3, -1};
+ SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
+ SDValue Amt13 = DAG.getVectorShuffle(VT, DL, Amt, Amt, OddMask);
+
+ SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
+ DAG.getBitcast(MVT::v2i64, R),
+ DAG.getBitcast(MVT::v2i64, Amt));
+ SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
+ DAG.getBitcast(MVT::v2i64, R13),
+ DAG.getBitcast(MVT::v2i64, Amt13));
+ Res02 = DAG.getBitcast(VT, Res02);
+ Res13 = DAG.getBitcast(VT, Res13);
+
+ return DAG.getNode(ISD::OR, DL, VT,
+ DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
+ DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
}
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll?rev=332832&r1=332831&r2=332832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll Mon May 21 02:45:59 2018
@@ -122,93 +122,61 @@ define <2 x i64> @var_rotate_v2i64(<2 x
define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: var_rotate_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
-; SSE2-NEXT: psubd %xmm1, %xmm2
; SSE2-NEXT: pslld $23, %xmm1
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm3, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrld %xmm1, %xmm3
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld %xmm5, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrld %xmm3, %xmm5
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: psrld %xmm2, %xmm0
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
-; SSE2-NEXT: orps %xmm4, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pslld $23, %xmm1
; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrld %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm6
-; SSE41-NEXT: psrld %xmm5, %xmm6
-; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrld %xmm2, %xmm4
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: psrld %xmm2, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: var_rotate_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm4
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v4i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT: vpsrlvd %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
+; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v4i32:
@@ -232,35 +200,20 @@ define <4 x i32> @var_rotate_v4i32(<4 x
;
; X32-SSE-LABEL: var_rotate_v4i32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
-; X32-SSE-NEXT: psubd %xmm1, %xmm2
; X32-SSE-NEXT: pslld $23, %xmm1
; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm3, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrld %xmm1, %xmm3
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld %xmm5, %xmm1
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: psrld %xmm3, %xmm5
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: psrld %xmm2, %xmm0
-; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
-; X32-SSE-NEXT: orps %xmm4, %xmm1
-; X32-SSE-NEXT: movaps %xmm1, %xmm0
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT: por %xmm3, %xmm0
; X32-SSE-NEXT: retl
%b32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %b
%shl = shl <4 x i32> %a, %b
@@ -900,87 +853,64 @@ define <2 x i64> @splatvar_rotate_v2i64(
define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v4i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm2, %xmm2
-; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
-; SSE2-NEXT: psubd %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: pslld %xmm2, %xmm4
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld %xmm1, %xmm2
-; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld %xmm5, %xmm1
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psrld %xmm3, %xmm5
-; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; SSE2-NEXT: psrld %xmm2, %xmm0
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
-; SSE2-NEXT: orps %xmm4, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: pslld $23, %xmm1
+; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_rotate_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: pslld %xmm2, %xmm1
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrld %xmm2, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm5 = xmm2[2,3,3,3,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm6
-; SSE41-NEXT: psrld %xmm5, %xmm6
-; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4,5,6,7]
-; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: psrld %xmm3, %xmm4
-; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; SSE41-NEXT: psrld %xmm2, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5],xmm6[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT: pslld $23, %xmm1
+; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
+; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_rotate_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm4
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_rotate_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
@@ -1014,30 +944,21 @@ define <4 x i32> @splatvar_rotate_v4i32(
;
; X32-SSE-LABEL: splatvar_rotate_v4i32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps %xmm2, %xmm2
-; X32-SSE-NEXT: movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32]
-; X32-SSE-NEXT: psubd %xmm1, %xmm3
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: pslld %xmm2, %xmm4
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm3[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: psrld %xmm1, %xmm2
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld %xmm5, %xmm1
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,0,1]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[2,3,3,3,4,5,6,7]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: psrld %xmm3, %xmm5
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,1,4,5,6,7]
-; X32-SSE-NEXT: psrld %xmm2, %xmm0
-; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm5[1]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,3]
-; X32-SSE-NEXT: orps %xmm4, %xmm1
-; X32-SSE-NEXT: movaps %xmm1, %xmm0
+; X32-SSE-NEXT: pslld $23, %xmm1
+; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT: por %xmm3, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
%splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
@@ -1642,62 +1563,52 @@ define <4 x i32> @constant_rotate_v4i32(
; SSE2-LABEL: constant_rotate_v4i32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pmuludq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $25, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psrld $26, %xmm3
-; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrld $27, %xmm1
-; SSE2-NEXT: psrld $28, %xmm0
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: por %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
-; SSE41-NEXT: pmulld %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrld $25, %xmm2
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psrld $27, %xmm3
-; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psrld $26, %xmm2
-; SSE41-NEXT: psrld $28, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: pmuludq %xmm2, %xmm3
+; SSE41-NEXT: pmuludq %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE41-NEXT: por %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: constant_rotate_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; AVX1-NEXT: vpsrld $25, %xmm0, %xmm2
-; AVX1-NEXT: vpsrld $27, %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-NEXT: vpsrld $26, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [16,32,64,128]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v4i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
-; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v4i32:
@@ -1722,25 +1633,17 @@ define <4 x i32> @constant_rotate_v4i32(
; X32-SSE-LABEL: constant_rotate_v4i32:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm2
-; X32-SSE-NEXT: pmuludq %xmm1, %xmm2
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm1, %xmm3
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $25, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm3
-; X32-SSE-NEXT: psrld $26, %xmm3
-; X32-SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm1[1]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $27, %xmm1
-; X32-SSE-NEXT: psrld $28, %xmm0
-; X32-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm3[0,3]
-; X32-SSE-NEXT: orps %xmm2, %xmm0
+; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE-NEXT: por %xmm3, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <4 x i32> %a, <i32 4, i32 5, i32 6, i32 7>
%lshr = lshr <4 x i32> %a, <i32 28, i32 27, i32 26, i32 25>
@@ -2156,16 +2059,16 @@ define <4 x i32> @splatconstant_rotate_v
; SSE-LABEL: splatconstant_rotate_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pslld $4, %xmm1
-; SSE-NEXT: psrld $28, %xmm0
+; SSE-NEXT: psrld $28, %xmm1
+; SSE-NEXT: pslld $4, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $28, %xmm0, %xmm1
+; AVX-NEXT: vpslld $4, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v4i32:
@@ -2189,8 +2092,8 @@ define <4 x i32> @splatconstant_rotate_v
; X32-SSE-LABEL: splatconstant_rotate_v4i32:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: pslld $4, %xmm1
-; X32-SSE-NEXT: psrld $28, %xmm0
+; X32-SSE-NEXT: psrld $28, %xmm1
+; X32-SSE-NEXT: pslld $4, %xmm0
; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <4 x i32> %a, <i32 4, i32 4, i32 4, i32 4>
@@ -2343,19 +2246,18 @@ define <4 x i32> @splatconstant_rotate_m
; SSE-LABEL: splatconstant_rotate_mask_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pslld $4, %xmm1
-; SSE-NEXT: psrld $28, %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrld $28, %xmm1
+; SSE-NEXT: pslld $4, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v4i32:
; AVX: # %bb.0:
-; AVX-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpsrld $28, %xmm0, %xmm1
+; AVX-NEXT: vpslld $4, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
@@ -2381,11 +2283,10 @@ define <4 x i32> @splatconstant_rotate_m
; X32-SSE-LABEL: splatconstant_rotate_mask_v4i32:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: pslld $4, %xmm1
-; X32-SSE-NEXT: psrld $28, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: por %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: psrld $28, %xmm1
+; X32-SSE-NEXT: pslld $4, %xmm0
+; X32-SSE-NEXT: por %xmm1, %xmm0
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <4 x i32> %a, <i32 4, i32 4, i32 4, i32 4>
%lshr = lshr <4 x i32> %a, <i32 28, i32 28, i32 28, i32 28>
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll?rev=332832&r1=332831&r2=332832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll Mon May 21 02:45:59 2018
@@ -88,55 +88,43 @@ define <4 x i64> @var_rotate_v4i64(<4 x
define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: var_rotate_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpsubd %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vpmulld %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpmuludq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
+; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm4, %xmm6, %xmm4
-; AVX1-NEXT: vpsrlq $32, %xmm3, %xmm5
-; AVX1-NEXT: vpsrld %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX1-NEXT: vpsrld %xmm7, %xmm6, %xmm7
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
-; AVX1-NEXT: vpsrld %xmm3, %xmm6, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
-; AVX1-NEXT: vpsrld %xmm6, %xmm0, %xmm6
-; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; AVX1-NEXT: vpsrld %xmm5, %xmm0, %xmm5
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32]
-; AVX2-NEXT: vpsubd %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
-; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT: vpsubd %ymm1, %ymm3, %ymm1
+; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v8i32:
@@ -523,44 +511,38 @@ define <4 x i64> @splatvar_rotate_v4i64(
define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: splatvar_rotate_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm1, %xmm3, %xmm4
-; AVX1-NEXT: vpslld %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm8
-; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-NEXT: vpsrld %xmm4, %xmm3, %xmm5
-; AVX1-NEXT: vpsrlq $32, %xmm2, %xmm6
-; AVX1-NEXT: vpsrld %xmm6, %xmm3, %xmm7
-; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-NEXT: vpxor %xmm7, %xmm7, %xmm7
-; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; AVX1-NEXT: vpsrld %xmm7, %xmm3, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpsrld %xmm2, %xmm3, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3],xmm1[4,5],xmm5[6,7]
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld %xmm6, %xmm0, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpsrld %xmm7, %xmm0, %xmm4
-; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm3[2,3],xmm5[4,5],xmm3[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpor %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm8, %ymm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_rotate_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd %xmm1, %ymm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
-; AVX2-NEXT: vpsubd %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm1
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32]
+; AVX2-NEXT: vpsubd %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
@@ -913,33 +895,35 @@ define <4 x i64> @constant_rotate_v4i64(
define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: constant_rotate_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm2, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrld $21, %xmm2, %xmm3
-; AVX1-NEXT: vpsrld $23, %xmm2, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpsrld $22, %xmm2, %xmm4
-; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [256,512,1024,2048]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [16,32,64,128]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
-; AVX1-NEXT: vpsrld $25, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld $27, %xmm0, %xmm4
-; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vpsrld $26, %xmm0, %xmm4
-; AVX1-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,2,2]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
-; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm1
+; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v8i32:
@@ -1233,21 +1217,21 @@ define <4 x i64> @splatconstant_rotate_v
define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpslld $4, %xmm2, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $28, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $28, %xmm1, %xmm2
+; AVX1-NEXT: vpslld $4, %xmm1, %xmm1
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $28, %xmm0, %xmm2
+; AVX1-NEXT: vpslld $4, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpslld $4, %ymm0, %ymm1
-; AVX2-NEXT: vpsrld $28, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpsrld $28, %ymm0, %ymm1
+; AVX2-NEXT: vpslld $4, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v8i32:
@@ -1450,25 +1434,23 @@ define <4 x i64> @splatconstant_rotate_m
define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: splatconstant_rotate_mask_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $4, %xmm0, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpslld $4, %xmm2, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vpsrld $28, %xmm0, %xmm0
-; AVX1-NEXT: vpsrld $28, %xmm2, %xmm2
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrld $28, %xmm1, %xmm2
+; AVX1-NEXT: vpslld $4, %xmm1, %xmm1
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $28, %xmm0, %xmm2
+; AVX1-NEXT: vpslld $4, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpslld $4, %ymm0, %ymm1
-; AVX2-NEXT: vpsrld $28, %ymm0, %ymm0
+; AVX2-NEXT: vpsrld $28, %ymm0, %ymm1
+; AVX2-NEXT: vpslld $4, %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll?rev=332832&r1=332831&r2=332832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll Mon May 21 02:45:59 2018
@@ -87,14 +87,13 @@ define <4 x i32> @var_shift_v4i32(<4 x i
; SSE2-NEXT: pslld $23, %xmm1
; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm2, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_shift_v4i32:
@@ -110,7 +109,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shift_v4i32:
@@ -143,14 +142,13 @@ define <4 x i32> @var_shift_v4i32(<4 x i
; X32-SSE-NEXT: pslld $23, %xmm1
; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
; X32-SSE-NEXT: cvttps2dq %xmm1, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE-NEXT: pmuludq %xmm2, %xmm0
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm1, %xmm0
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X32-SSE-NEXT: pmuludq %xmm2, %xmm1
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X32-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-SSE-NEXT: retl
%shift = shl <4 x i32> %a, %b
ret <4 x i32> %shift
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll?rev=332832&r1=332831&r2=332832&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll Mon May 21 02:45:59 2018
@@ -93,11 +93,11 @@ define <8 x i32> @var_shift_v8i32(<8 x i
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
@@ -138,11 +138,11 @@ define <8 x i32> @var_shift_v8i32(<8 x i
; X32-AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; X32-AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; X32-AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpmulld %xmm2, %xmm4, %xmm2
; X32-AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; X32-AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; X32-AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; X32-AVX1-NEXT: vpmulld %xmm0, %xmm1, %xmm0
+; X32-AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
More information about the llvm-commits
mailing list