[llvm] 0caf8a3 - [X86] LowerRotate - enable vXi32 splat handling
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 21 03:19:52 PST 2021
Author: Simon Pilgrim
Date: 2021-12-21T11:19:23Z
New Revision: 0caf8a3daf16bec0014535ddf8b402931a6a6917
URL: https://github.com/llvm/llvm-project/commit/0caf8a3daf16bec0014535ddf8b402931a6a6917
DIFF: https://github.com/llvm/llvm-project/commit/0caf8a3daf16bec0014535ddf8b402931a6a6917.diff
LOG: [X86] LowerRotate - enable vXi32 splat handling
Pull out the "rotl(x,y) --> (unpack(x,x) << zext(splat(y % bw))) >> bw" special case from vXi8 lowering so we can reuse it for vXi32 types as well.
There's still some regressions with vXi16 to handle before this becomes entirely general.
It also allows us to remove the now unnecessary hack for handling amount-modulo before splatting.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-fshl-128.ll
llvm/test/CodeGen/X86/vector-fshl-256.ll
llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
llvm/test/CodeGen/X86/vector-rotate-128.ll
llvm/test/CodeGen/X86/vector-rotate-256.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e691b23f642e0..004d9887c2aab 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -6820,7 +6820,6 @@ static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
/// Returns a node that packs the LHS + RHS nodes together at half width.
/// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
-/// TODO: Add vXi64 -> vXi32 pack support with vector_shuffle node.
/// TODO: Add subvector splitting if/when we have a need for it.
static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
@@ -6832,9 +6831,24 @@ static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
VT.getSizeInBits() == OpVT.getSizeInBits() &&
(EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
"Unexpected PACK operand types");
- assert((EltSizeInBits == 8 || EltSizeInBits == 16) &&
+ assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
"Unexpected PACK result type");
+ // Rely on vector shuffles for vXi64 -> vXi32 packing.
+ if (EltSizeInBits == 32) {
+ SmallVector<int> PackMask;
+ int Offset = PackHiHalf ? 1 : 0;
+ int NumElts = VT.getVectorNumElements();
+ for (int I = 0; I != NumElts; I += 4) {
+ PackMask.push_back(I + Offset);
+ PackMask.push_back(I + Offset + 2);
+ PackMask.push_back(I + Offset + NumElts);
+ PackMask.push_back(I + Offset + NumElts + 2);
+ }
+ return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
+ DAG.getBitcast(VT, RHS), PackMask);
+ }
+
// See if we already have sufficient leading bits for PACKSS/PACKUS.
if (!PackHiHalf) {
if (UsePackUS &&
@@ -29866,44 +29880,53 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
(VT == MVT::v64i8 && Subtarget.useBWIRegs())) &&
"Only vXi32/vXi16/vXi8 vector rotates supported");
- bool IsSplatAmt = DAG.isSplatValue(Amt);
+ // Check for a hidden ISD::ROTR, splat + vXi8 lowering can handle both, but we
+ // currently hit infinite loops in legalization if we allow ISD::ROTR.
+ // FIXME: Infinite ROTL<->ROTR legalization in TargetLowering::expandROT.
+ SDValue HiddenROTRAmt;
+ if (Amt.getOpcode() == ISD::SUB &&
+ ISD::isBuildVectorAllZeros(Amt.getOperand(0).getNode()))
+ HiddenROTRAmt = Amt.getOperand(1);
+
+ MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
+ MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
+
SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
+ SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT,
+ HiddenROTRAmt ? HiddenROTRAmt : Amt, AmtMask);
+
+ // Attempt to fold as unpack(x,x) << zext(splat(y)):
+ // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
+ // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
+ // TODO: Handle vXi16 cases.
+ if (EltSizeInBits == 8 || EltSizeInBits == 32) {
+ if (SDValue BaseRotAmt = DAG.getSplatValue(AmtMod)) {
+ unsigned ShiftX86Opc = HiddenROTRAmt ? X86ISD::VSRLI : X86ISD::VSHLI;
+ SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
+ SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
+ BaseRotAmt = DAG.getZExtOrTrunc(BaseRotAmt, DL, MVT::i32);
+ Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
+ Subtarget, DAG);
+ Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
+ Subtarget, DAG);
+ return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !HiddenROTRAmt);
+ }
+ }
// v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
// the amount bit.
// TODO: We're doing nothing here that we couldn't do for funnel shifts.
if (EltSizeInBits == 8) {
bool IsConstAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
- // Check for a hidden ISD::ROTR, vXi8 lowering can handle both, but we
- // currently hit infinite loops in legalization if we allow ISD::ROTR.
- // FIXME: Infinite ROTL<->ROTR legalization in TargetLowering::expandROT.
- SDValue HiddenROTRAmt;
- if (Amt.getOpcode() == ISD::SUB &&
- ISD::isBuildVectorAllZeros(Amt.getOperand(0).getNode()))
- HiddenROTRAmt = Amt.getOperand(1);
-
- MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
MVT WideVT =
MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
unsigned ShiftOpc = HiddenROTRAmt ? ISD::SRL : ISD::SHL;
- unsigned ShiftX86Opc = HiddenROTRAmt ? X86ISD::VSRLI : X86ISD::VSHLI;
- SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT,
- HiddenROTRAmt ? HiddenROTRAmt : Amt, AmtMask);
- // Attempt to fold as unpack(x,x) << zext(y):
+ // Attempt to fold as:
// rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
// rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
- if (SDValue BaseRotAmt = DAG.getSplatValue(AmtMod)) {
- BaseRotAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, BaseRotAmt);
- SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
- SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
- Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
- Subtarget, DAG);
- Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
- Subtarget, DAG);
- return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !HiddenROTRAmt);
- } else if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
- supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
+ if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
+ supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
// If we're rotating by constant, just use default promotion.
if (IsConstAmt)
return SDValue();
@@ -29917,8 +29940,12 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
if (!HiddenROTRAmt)
R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
- } else if (IsConstAmt ||
- supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
+ }
+
+ // Attempt to fold as unpack(x,x) << zext(y):
+ // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
+ // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
+ if (IsConstAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
// See if we can perform this by unpacking to lo/hi vXi16.
SDValue Z = DAG.getConstant(0, DL, VT);
SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
@@ -29996,17 +30023,9 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
}
// ISD::ROT* uses modulo rotate amounts.
- if (SDValue BaseRotAmt = DAG.getSplatValue(Amt)) {
- // If the amount is a splat, perform the modulo BEFORE the splat,
- // this helps LowerShiftByScalarVariable to remove the splat later.
- Amt = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, BaseRotAmt);
- Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
- Amt = DAG.getVectorShuffle(VT, DL, Amt, DAG.getUNDEF(VT),
- SmallVector<int>(NumElts, 0));
- } else {
- Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
- }
+ Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
+ bool IsSplatAmt = DAG.isSplatValue(Amt);
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
supportedVectorVarShift(VT, Subtarget, ISD::SRL);
diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index 97a485b04b41a..bc394c072fc34 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -1714,19 +1714,17 @@ define void @sink_splatvar(i32* %p, i32 %shift_amt) {
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: movd %xmm0, %ecx
; SSE2-NEXT: andl $31, %ecx
-; SSE2-NEXT: movl $32, %edx
-; SSE2-NEXT: subl %ecx, %edx
-; SSE2-NEXT: movd %edx, %xmm0
-; SSE2-NEXT: movd %ecx, %xmm1
+; SSE2-NEXT: movd %ecx, %xmm0
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB8_1: # %loop
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqu 1024(%rdi,%rax), %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: psrld %xmm0, %xmm3
-; SSE2-NEXT: pslld %xmm1, %xmm2
-; SSE2-NEXT: por %xmm3, %xmm2
-; SSE2-NEXT: movdqu %xmm2, 1024(%rdi,%rax)
+; SSE2-NEXT: movdqu 1024(%rdi,%rax), %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; SSE2-NEXT: psllq %xmm0, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: psllq %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; SSE2-NEXT: movups %xmm1, 1024(%rdi,%rax)
; SSE2-NEXT: addq $16, %rax
; SSE2-NEXT: jne .LBB8_1
; SSE2-NEXT: # %bb.2: # %end
@@ -1734,22 +1732,19 @@ define void @sink_splatvar(i32* %p, i32 %shift_amt) {
;
; SSE41-LABEL: sink_splatvar:
; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movd %esi, %xmm1
+; SSE41-NEXT: movd %esi, %xmm0
; SSE41-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm1, %xmm0
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: .p2align 4, 0x90
; SSE41-NEXT: .LBB8_1: # %loop
; SSE41-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE41-NEXT: movdqu 1024(%rdi,%rax), %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psrld %xmm0, %xmm3
-; SSE41-NEXT: pslld %xmm1, %xmm2
-; SSE41-NEXT: por %xmm3, %xmm2
-; SSE41-NEXT: movdqu %xmm2, 1024(%rdi,%rax)
+; SSE41-NEXT: movdqu 1024(%rdi,%rax), %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; SSE41-NEXT: psllq %xmm0, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: psllq %xmm0, %xmm1
+; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; SSE41-NEXT: movups %xmm1, 1024(%rdi,%rax)
; SSE41-NEXT: addq $16, %rax
; SSE41-NEXT: jne .LBB8_1
; SSE41-NEXT: # %bb.2: # %end
@@ -1759,19 +1754,17 @@ define void @sink_splatvar(i32* %p, i32 %shift_amt) {
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: .p2align 4, 0x90
; AVX1-NEXT: .LBB8_1: # %loop
; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
-; AVX1-NEXT: vmovdqu 1024(%rdi,%rax), %xmm2
-; AVX1-NEXT: vpsrld %xmm0, %xmm2, %xmm3
-; AVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqu %xmm2, 1024(%rdi,%rax)
+; AVX1-NEXT: vmovdqu 1024(%rdi,%rax), %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; AVX1-NEXT: vmovups %xmm1, 1024(%rdi,%rax)
; AVX1-NEXT: addq $16, %rax
; AVX1-NEXT: jne .LBB8_1
; AVX1-NEXT: # %bb.2: # %end
@@ -1935,20 +1928,18 @@ define void @sink_splatvar(i32* %p, i32 %shift_amt) {
; X86-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE2-NEXT: movd %xmm0, %edx
; X86-SSE2-NEXT: andl $31, %edx
-; X86-SSE2-NEXT: movl $32, %esi
-; X86-SSE2-NEXT: subl %edx, %esi
-; X86-SSE2-NEXT: movd %esi, %xmm0
-; X86-SSE2-NEXT: movd %edx, %xmm1
+; X86-SSE2-NEXT: movd %edx, %xmm0
; X86-SSE2-NEXT: xorl %edx, %edx
; X86-SSE2-NEXT: .p2align 4, 0x90
; X86-SSE2-NEXT: .LBB8_1: # %loop
; X86-SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-SSE2-NEXT: movdqu (%eax,%ecx,4), %xmm2
-; X86-SSE2-NEXT: movdqa %xmm2, %xmm3
-; X86-SSE2-NEXT: psrld %xmm0, %xmm3
-; X86-SSE2-NEXT: pslld %xmm1, %xmm2
-; X86-SSE2-NEXT: por %xmm3, %xmm2
-; X86-SSE2-NEXT: movdqu %xmm2, (%eax,%ecx,4)
+; X86-SSE2-NEXT: movdqu (%eax,%ecx,4), %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; X86-SSE2-NEXT: psllq %xmm0, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; X86-SSE2-NEXT: psllq %xmm0, %xmm1
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; X86-SSE2-NEXT: movups %xmm1, (%eax,%ecx,4)
; X86-SSE2-NEXT: addl $4, %ecx
; X86-SSE2-NEXT: adcl $0, %edx
; X86-SSE2-NEXT: movl %ecx, %esi
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 5f9357a33932b..bb60370c69262 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -1423,44 +1423,42 @@ define void @fancierRotate2(i32* %arr, i8* %control, i32 %rot0, i32 %rot1) {
; AVX1-LABEL: fancierRotate2:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vmovd %edx, %xmm1
-; AVX1-NEXT: vmovd %ecx, %xmm3
+; AVX1-NEXT: vmovd %ecx, %xmm2
; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31]
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm11 = xmm4[0],zero,xmm4[1],zero
-; AVX1-NEXT: vpsubd %xmm4, %xmm5, %xmm4
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [31,31]
+; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: .p2align 4, 0x90
; AVX1-NEXT: .LBB8_1: # %loop
; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
-; AVX1-NEXT: vmovq {{.*#+}} xmm5 = mem[0],zero
-; AVX1-NEXT: vpcmpeqb %xmm5, %xmm8, %xmm5
-; AVX1-NEXT: vpmovsxbd %xmm5, %xmm6
-; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
-; AVX1-NEXT: vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT: vmovdqu 4096(%rdi,%rax,4), %xmm7
-; AVX1-NEXT: vmovdqu 4112(%rdi,%rax,4), %xmm0
-; AVX1-NEXT: vpslld %xmm9, %xmm7, %xmm1
-; AVX1-NEXT: vpsrld %xmm10, %xmm7, %xmm2
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpslld %xmm9, %xmm0, %xmm2
-; AVX1-NEXT: vpsrld %xmm10, %xmm0, %xmm3
-; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpslld %xmm11, %xmm7, %xmm3
-; AVX1-NEXT: vpsrld %xmm4, %xmm7, %xmm7
-; AVX1-NEXT: vpor %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vblendvps %xmm6, %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpslld %xmm11, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vblendvps %xmm5, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovups %xmm1, 4096(%rdi,%rax,4)
+; AVX1-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero
+; AVX1-NEXT: vpcmpeqb %xmm3, %xmm8, %xmm3
+; AVX1-NEXT: vpmovsxbd %xmm3, %xmm10
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1]
+; AVX1-NEXT: vpmovsxbd %xmm3, %xmm9
+; AVX1-NEXT: vmovdqu 4096(%rdi,%rax,4), %xmm5
+; AVX1-NEXT: vmovdqu 4112(%rdi,%rax,4), %xmm6
+; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm5[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm7, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm5, %xmm3
+; AVX1-NEXT: vshufps {{.*#+}} xmm11 = xmm3[1,3],xmm0[1,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm6[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm6, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
+; AVX1-NEXT: vpsllq %xmm2, %xmm7, %xmm4
+; AVX1-NEXT: vpsllq %xmm2, %xmm5, %xmm5
+; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm5[1,3],xmm4[1,3]
+; AVX1-NEXT: vblendvps %xmm10, %xmm11, %xmm4, %xmm4
+; AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpsllq %xmm2, %xmm6, %xmm5
+; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm5[1,3],xmm3[1,3]
+; AVX1-NEXT: vblendvps %xmm9, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vmovups %xmm4, 4096(%rdi,%rax,4)
; AVX1-NEXT: vmovups %xmm0, 4112(%rdi,%rax,4)
; AVX1-NEXT: addq $8, %rax
; AVX1-NEXT: jne .LBB8_1
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
index c0b7423051a68..66a318610af53 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -827,55 +827,35 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v4i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: andl $31, %eax
; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pslld %xmm1, %xmm2
-; SSE2-NEXT: movl $32, %ecx
-; SSE2-NEXT: subl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: psrld %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psllq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: psllq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_funnnel_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pslld %xmm2, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm1, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: psrld %xmm1, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE41-NEXT: psllq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: psllq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: splatvar_funnnel_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_funnnel_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: splatvar_funnnel_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; AVX-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v4i32:
; AVX512F: # %bb.0:
@@ -936,16 +916,14 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; X86-SSE2-NEXT: movd %xmm1, %eax
; X86-SSE2-NEXT: andl $31, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
-; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
-; X86-SSE2-NEXT: pslld %xmm1, %xmm2
-; X86-SSE2-NEXT: movl $32, %ecx
-; X86-SSE2-NEXT: subl %eax, %ecx
-; X86-SSE2-NEXT: movd %ecx, %xmm1
-; X86-SSE2-NEXT: psrld %xmm1, %xmm0
-; X86-SSE2-NEXT: por %xmm2, %xmm0
+; X86-SSE2-NEXT: psllq %xmm1, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: psllq %xmm1, %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; X86-SSE2-NEXT: retl
%splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
%res = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %splat)
@@ -956,12 +934,12 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
; SSE2-LABEL: splatvar_funnnel_v8i16:
; SSE2: # %bb.0:
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psllw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; SSE2-NEXT: psubw %xmm1, %xmm2
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psllw %xmm1, %xmm3
; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: psrlw %xmm2, %xmm0
@@ -1072,12 +1050,12 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
; X86-SSE2-LABEL: splatvar_funnnel_v8i16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
+; X86-SSE2-NEXT: pand %xmm1, %xmm2
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
+; X86-SSE2-NEXT: psllw %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; X86-SSE2-NEXT: psubw %xmm1, %xmm2
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
-; X86-SSE2-NEXT: psllw %xmm1, %xmm3
; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psrlw %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index 5550bbcd83b2a..75693cccd8bce 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -662,32 +662,31 @@ define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind
define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-LABEL: splatvar_funnnel_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm5, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpslld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpsllq %xmm1, %ymm2, %ymm2
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
+; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v8i32:
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index 3d385d3cf7f83..4425d4e1ccc66 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -162,71 +162,44 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE2-NEXT: pslld $23, %xmm1
-; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: andl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: psllq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: psllq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; SSE41-NEXT: pslld $23, %xmm1
-; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: pmuludq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE41-NEXT: psllq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: psllq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v2i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX2-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v2i32:
@@ -288,22 +261,14 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE2-NEXT: por %xmm3, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; X86-SSE2-NEXT: movd %xmm1, %eax
+; X86-SSE2-NEXT: andl $31, %eax
+; X86-SSE2-NEXT: movd %eax, %xmm1
+; X86-SSE2-NEXT: psllq %xmm1, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: psllq %xmm1, %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; X86-SSE2-NEXT: retl
%splat = shufflevector <2 x i32> %amt, <2 x i32> undef, <2 x i32> zeroinitializer
%res = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> %x, <2 x i32> %x, <2 x i32> %splat)
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index 2762549b27f66..477f259021891 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -865,62 +865,35 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v4i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: negl %eax
; SSE2-NEXT: andl $31, %eax
; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pslld %xmm1, %xmm2
-; SSE2-NEXT: movl $32, %ecx
-; SSE2-NEXT: subl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: psrld %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psrlq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: psrlq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_funnnel_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: psubd %xmm1, %xmm2
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pslld %xmm1, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm2, %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: psrld %xmm1, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE41-NEXT: psrlq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: psrlq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: splatvar_funnnel_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_funnnel_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: splatvar_funnnel_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; AVX-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v4i32:
; AVX512F: # %bb.0:
@@ -985,17 +958,14 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: negl %eax
; X86-SSE2-NEXT: andl $31, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
-; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
-; X86-SSE2-NEXT: pslld %xmm1, %xmm2
-; X86-SSE2-NEXT: movl $32, %ecx
-; X86-SSE2-NEXT: subl %eax, %ecx
-; X86-SSE2-NEXT: movd %ecx, %xmm1
-; X86-SSE2-NEXT: psrld %xmm1, %xmm0
-; X86-SSE2-NEXT: por %xmm2, %xmm0
+; X86-SSE2-NEXT: psrlq %xmm1, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: psrlq %xmm1, %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; X86-SSE2-NEXT: retl
%splat = shufflevector <4 x i32> %amt, <4 x i32> undef, <4 x i32> zeroinitializer
%res = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %splat)
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index 2b502631a251a..c760469b20b45 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -703,36 +703,31 @@ define <4 x i64> @splatvar_funnnel_v4i64(<4 x i64> %x, <4 x i64> %amt) nounwind
define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind {
; AVX1-LABEL: splatvar_funnnel_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpslld %xmm2, %xmm3, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm5, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
+; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v8i32:
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index b4758b35574a4..88975d76af982 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -174,80 +174,44 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: psubd %xmm1, %xmm2
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0]
-; SSE2-NEXT: pslld $23, %xmm1
-; SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: andl $31, %eax
+; SSE2-NEXT: movd %eax, %xmm1
+; SSE2-NEXT: psrlq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: psrlq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_funnnel_v2i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: psubd %xmm1, %xmm2
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0]
-; SSE41-NEXT: pslld $23, %xmm1
-; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: pmuludq %xmm2, %xmm3
-; SSE41-NEXT: pmuludq %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,0,2,2]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE41-NEXT: por %xmm1, %xmm0
+; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE41-NEXT: psrlq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: psrlq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_funnnel_v2i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
-; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,2,2]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v2i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX2-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX2-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_funnnel_v2i32:
@@ -313,24 +277,14 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pxor %xmm2, %xmm2
-; X86-SSE2-NEXT: psubd %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,0,0]
-; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE2-NEXT: por %xmm3, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; X86-SSE2-NEXT: movd %xmm1, %eax
+; X86-SSE2-NEXT: andl $31, %eax
+; X86-SSE2-NEXT: movd %eax, %xmm1
+; X86-SSE2-NEXT: psrlq %xmm1, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: psrlq %xmm1, %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; X86-SSE2-NEXT: retl
%splat = shufflevector <2 x i32> %amt, <2 x i32> undef, <2 x i32> zeroinitializer
%res = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> %x, <2 x i32> %x, <2 x i32> %splat)
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index 572a007de6446..7cf3c48908ed5 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -809,55 +809,35 @@ define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v4i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: andl $31, %eax
; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: pslld %xmm1, %xmm2
-; SSE2-NEXT: movl $32, %ecx
-; SSE2-NEXT: subl %eax, %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: psrld %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: psllq %xmm1, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: psllq %xmm1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_rotate_v4i32:
; SSE41: # %bb.0:
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: pslld %xmm2, %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32]
-; SSE41-NEXT: psubd %xmm1, %xmm2
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: psrld %xmm1, %xmm0
-; SSE41-NEXT: por %xmm3, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE41-NEXT: psllq %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: psllq %xmm1, %xmm0
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: splatvar_rotate_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: splatvar_rotate_v4i32:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT: retq
+; AVX-LABEL: splatvar_rotate_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; AVX-NEXT: retq
;
; AVX512F-LABEL: splatvar_rotate_v4i32:
; AVX512F: # %bb.0:
@@ -918,16 +898,14 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
; X86-SSE2-NEXT: movd %xmm1, %eax
; X86-SSE2-NEXT: andl $31, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
-; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
-; X86-SSE2-NEXT: pslld %xmm1, %xmm2
-; X86-SSE2-NEXT: movl $32, %ecx
-; X86-SSE2-NEXT: subl %eax, %ecx
-; X86-SSE2-NEXT: movd %ecx, %xmm1
-; X86-SSE2-NEXT: psrld %xmm1, %xmm0
-; X86-SSE2-NEXT: por %xmm2, %xmm0
+; X86-SSE2-NEXT: psllq %xmm1, %xmm2
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-SSE2-NEXT: psllq %xmm1, %xmm0
+; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; X86-SSE2-NEXT: retl
%splat = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
%splat32 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %splat
@@ -941,12 +919,12 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v8i16:
; SSE2: # %bb.0:
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: psllw %xmm2, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; SSE2-NEXT: psubw %xmm1, %xmm2
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: psllw %xmm1, %xmm3
; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: psrlw %xmm2, %xmm0
@@ -1057,12 +1035,12 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; X86-SSE2-LABEL: splatvar_rotate_v8i16:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
+; X86-SSE2-NEXT: pand %xmm1, %xmm2
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
+; X86-SSE2-NEXT: psllw %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; X86-SSE2-NEXT: psubw %xmm1, %xmm2
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
-; X86-SSE2-NEXT: psllw %xmm1, %xmm3
; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1]
; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psrlw %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 2e2861e30cc0b..b58338201e032 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -652,32 +652,31 @@ define <4 x i64> @splatvar_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX1-LABEL: splatvar_rotate_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpslld %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [32,32,32,32]
-; AVX1-NEXT: vpsubd %xmm1, %xmm5, %xmm1
-; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vpslld %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,2,3,3]
+; AVX1-NEXT: vpsllq %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm3[1,3],ymm0[5,7],ymm3[5,7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_rotate_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpslld %xmm2, %ymm0, %ymm2
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [32,32,32,32]
-; AVX2-NEXT: vpsubd %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpsllq %xmm1, %ymm2, %ymm2
+; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
+; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_rotate_v8i32:
More information about the llvm-commits
mailing list