[llvm] 0fa258c - [X86] Implement certain 16-bit vector shifts via 32-bit shifts
David Majnemer via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 19 11:02:04 PDT 2024
Author: David Majnemer
Date: 2024-09-19T18:01:56Z
New Revision: 0fa258c8d93c2f8de66518868a8e2a645b90afbe
URL: https://github.com/llvm/llvm-project/commit/0fa258c8d93c2f8de66518868a8e2a645b90afbe
DIFF: https://github.com/llvm/llvm-project/commit/0fa258c8d93c2f8de66518868a8e2a645b90afbe.diff
LOG: [X86] Implement certain 16-bit vector shifts via 32-bit shifts
x86 vector ISAs are non-orthogonal in a number of ways. For example,
AVX2 has vpsravd but it does not have vpsravw. However, we can simulate
it via vpsrlvd and some SWAR-style masking.
Another example is 8-bit shifts: we can use vpsllvd to simulate the
missing "vpsllvb" if shift amounts can be shared for a single lane.
Existing code generation would use a variety of techniques including
vpmulhuw which is higher latency and often has more rigid port
requirements than simple bitwise operations.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
llvm/test/CodeGen/X86/vector-shift-shl-128.ll
llvm/test/CodeGen/X86/vector-shift-shl-256.ll
llvm/test/CodeGen/X86/vector-shift-shl-512.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c2bce6f01ef8f4..9637e96c21cf52 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -95,6 +95,11 @@ static cl::opt<int> BrMergingCcmpBias(
"supports conditional compare instructions."),
cl::Hidden);
+static cl::opt<bool>
+ WidenShift("x86-widen-shift", cl::init(true),
+ cl::desc("Replacte narrow shifts with wider shifts."),
+ cl::Hidden);
+
static cl::opt<int> BrMergingLikelyBias(
"x86-br-merging-likely-bias", cl::init(0),
cl::desc("Increases 'x86-br-merging-base-cost' in cases that it is likely "
@@ -29851,104 +29856,128 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
}
}
- // Constant ISD::SRA/SRL/SHL can be performed efficiently on vXi8 vectors by
- // using vXi16 vector operations.
+ // Constant ISD::SRA/SRL/SHL can be performed efficiently on vXiN vectors by
+ // using vYiM vector operations where X*N == Y*M and M > N.
if (ConstantAmt &&
- (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
- (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
+ (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
+ VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16) &&
!Subtarget.hasXOP()) {
+ MVT NarrowScalarVT = VT.getScalarType();
int NumElts = VT.getVectorNumElements();
- MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
- // We can do this extra fast if each pair of i8 elements is shifted by the
- // same amount by doing this SWAR style: use a shift to move the valid bits
- // to the right position, mask out any bits which crossed from one element
- // to the other.
- APInt UndefElts;
- SmallVector<APInt, 64> AmtBits;
+ // We can do this extra fast if each pair of narrow elements is shifted by
+ // the same amount by doing this SWAR style: use a shift to move the valid
+ // bits to the right position, mask out any bits which crossed from one
+ // element to the other.
// This optimized lowering is only valid if the elements in a pair can
// be treated identically.
- bool SameShifts = true;
- SmallVector<APInt, 32> AmtBits16(NumElts / 2);
- APInt UndefElts16 = APInt::getZero(AmtBits16.size());
- if (getTargetConstantBitsFromNode(Amt, /*EltSizeInBits=*/8, UndefElts,
- AmtBits, /*AllowWholeUndefs=*/true,
- /*AllowPartialUndefs=*/false)) {
- // Collect information to construct the BUILD_VECTOR for the i16 version
- // of the shift. Conceptually, this is equivalent to:
- // 1. Making sure the shift amounts are the same for both the low i8 and
- // high i8 corresponding to the i16 lane.
- // 2. Extending that shift amount to i16 for a build vector operation.
- //
- // We want to handle undef shift amounts which requires a little more
- // logic (e.g. if one is undef and the other is not, grab the other shift
- // amount).
- for (unsigned SrcI = 0, E = AmtBits.size(); SrcI != E; SrcI += 2) {
+ SmallVector<SDValue, 32> AmtWideElts;
+ AmtWideElts.reserve(NumElts);
+ for (int I = 0; I != NumElts; ++I) {
+ AmtWideElts.push_back(Amt.getOperand(I));
+ }
+ SmallVector<SDValue, 32> TmpAmtWideElts;
+ int WideEltSizeInBits = EltSizeInBits;
+ while (WideEltSizeInBits < 32) {
+ // AVX1 does not have psrlvd, etc. which makes interesting 32-bit shifts
+ // unprofitable.
+ if (WideEltSizeInBits >= 16 && !Subtarget.hasAVX2()) {
+ break;
+ }
+ TmpAmtWideElts.resize(AmtWideElts.size() / 2);
+ bool SameShifts = true;
+ for (unsigned SrcI = 0, E = AmtWideElts.size(); SrcI != E; SrcI += 2) {
unsigned DstI = SrcI / 2;
// Both elements are undef? Make a note and keep going.
- if (UndefElts[SrcI] && UndefElts[SrcI + 1]) {
- AmtBits16[DstI] = APInt::getZero(16);
- UndefElts16.setBit(DstI);
+ if (AmtWideElts[SrcI].isUndef() && AmtWideElts[SrcI + 1].isUndef()) {
+ TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
continue;
}
// Even element is undef? We will shift it by the same shift amount as
// the odd element.
- if (UndefElts[SrcI]) {
- AmtBits16[DstI] = AmtBits[SrcI + 1].zext(16);
+ if (AmtWideElts[SrcI].isUndef()) {
+ TmpAmtWideElts[DstI] = AmtWideElts[SrcI + 1];
continue;
}
// Odd element is undef? We will shift it by the same shift amount as
// the even element.
- if (UndefElts[SrcI + 1]) {
- AmtBits16[DstI] = AmtBits[SrcI].zext(16);
+ if (AmtWideElts[SrcI + 1].isUndef()) {
+ TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
continue;
}
// Both elements are equal.
- if (AmtBits[SrcI] == AmtBits[SrcI + 1]) {
- AmtBits16[DstI] = AmtBits[SrcI].zext(16);
+ if (AmtWideElts[SrcI].getNode()->getAsAPIntVal() ==
+ AmtWideElts[SrcI + 1].getNode()->getAsAPIntVal()) {
+ TmpAmtWideElts[DstI] = AmtWideElts[SrcI];
continue;
}
- // One of the provisional i16 elements will not have the same shift
+ // One of the provisional wide elements will not have the same shift
// amount. Let's bail.
SameShifts = false;
break;
}
+ if (!SameShifts) {
+ break;
+ }
+ WideEltSizeInBits *= 2;
+ std::swap(TmpAmtWideElts, AmtWideElts);
}
+ APInt APIntShiftAmt;
+ bool IsConstantSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
+ bool Profitable = WidenShift;
+ // AVX512BW brings support for vpsllvw.
+ if (WideEltSizeInBits * AmtWideElts.size() >= 512 &&
+ WideEltSizeInBits < 32 && !Subtarget.hasBWI()) {
+ Profitable = false;
+ }
+ // Leave AVX512 uniform arithmetic shifts alone, they can be implemented
+ // fairly cheaply in other ways.
+ if (WideEltSizeInBits * AmtWideElts.size() >= 512 && IsConstantSplat) {
+ Profitable = false;
+ }
+ // Leave it up to GFNI if we have it around.
+ // TODO: gf2p8affine is usually higher latency and more port restricted. It
+ // is probably a win to use other strategies in some cases.
+ if (EltSizeInBits == 8 && Subtarget.hasGFNI()) {
+ Profitable = false;
+ }
+
+ // AVX1 does not have vpand which makes our masking impractical. It does
+ // have vandps but that is an FP instruction and crossing FP<->int typically
+ // has some cost.
+ if (WideEltSizeInBits * AmtWideElts.size() >= 256 &&
+ (WideEltSizeInBits < 32 || IsConstantSplat) && !Subtarget.hasAVX2()) {
+ Profitable = false;
+ }
+ int WideNumElts = AmtWideElts.size();
// We are only dealing with identical pairs.
- if (SameShifts) {
- // Cast the operand to vXi16.
- SDValue R16 = DAG.getBitcast(VT16, R);
+ if (Profitable && WideNumElts != NumElts) {
+ MVT WideScalarVT = MVT::getIntegerVT(WideEltSizeInBits);
+ MVT WideVT = MVT::getVectorVT(WideScalarVT, WideNumElts);
+ // Cast the operand to vXiM.
+ SDValue RWide = DAG.getBitcast(WideVT, R);
// Create our new vector of shift amounts.
- SDValue Amt16 = getConstVector(AmtBits16, UndefElts16, VT16, DAG, dl);
+ SDValue AmtWide = DAG.getBuildVector(
+ MVT::getVectorVT(NarrowScalarVT, WideNumElts), dl, AmtWideElts);
+ AmtWide = DAG.getZExtOrTrunc(AmtWide, dl, WideVT);
// Perform the actual shift.
unsigned LogicalOpc = Opc == ISD::SRA ? ISD::SRL : Opc;
- SDValue ShiftedR = DAG.getNode(LogicalOpc, dl, VT16, R16, Amt16);
+ SDValue ShiftedR = DAG.getNode(LogicalOpc, dl, WideVT, RWide, AmtWide);
// Now we need to construct a mask which will "drop" bits that get
// shifted past the LSB/MSB. For a logical shift left, it will look
// like:
- // MaskLowBits = (0xff << Amt16) & 0xff;
- // MaskHighBits = MaskLowBits << 8;
- // Mask = MaskLowBits | MaskHighBits;
+ // FullMask = (1 << EltSizeInBits) - 1
+ // Mask = FullMask << Amt
//
- // This masking ensures that bits cannot migrate from one i8 to
+ // This masking ensures that bits cannot migrate from one narrow lane to
// another. The construction of this mask will be constant folded.
// The mask for a logical right shift is nearly identical, the only
- //
diff erence is that 0xff is shifted right instead of left.
- SDValue Cst255 = DAG.getConstant(0xff, dl, MVT::i16);
- SDValue Splat255 = DAG.getSplat(VT16, dl, Cst255);
- // The mask for the low bits is most simply expressed as an 8-bit
- // field of all ones which is shifted in the exact same way the data
- // is shifted but masked with 0xff.
- SDValue MaskLowBits = DAG.getNode(LogicalOpc, dl, VT16, Splat255, Amt16);
- MaskLowBits = DAG.getNode(ISD::AND, dl, VT16, MaskLowBits, Splat255);
- SDValue Cst8 = DAG.getConstant(8, dl, MVT::i16);
- SDValue Splat8 = DAG.getSplat(VT16, dl, Cst8);
- // The mask for the high bits is the same as the mask for the low bits but
- // shifted up by 8.
- SDValue MaskHighBits =
- DAG.getNode(ISD::SHL, dl, VT16, MaskLowBits, Splat8);
- SDValue Mask = DAG.getNode(ISD::OR, dl, VT16, MaskLowBits, MaskHighBits);
+ //
diff erence is that the all ones mask is shifted right instead of left.
+ SDValue CstFullMask = DAG.getAllOnesConstant(dl, NarrowScalarVT);
+ SDValue SplatFullMask = DAG.getSplat(VT, dl, CstFullMask);
+ SDValue Mask = DAG.getNode(LogicalOpc, dl, VT, SplatFullMask, Amt);
+ Mask = DAG.getBitcast(WideVT, Mask);
// Finally, we mask the shifted vector with the SWAR mask.
- SDValue Masked = DAG.getNode(ISD::AND, dl, VT16, ShiftedR, Mask);
+ SDValue Masked = DAG.getNode(ISD::AND, dl, WideVT, ShiftedR, Mask);
Masked = DAG.getBitcast(VT, Masked);
if (Opc != ISD::SRA) {
// Logical shifts are complete at this point.
@@ -29956,14 +29985,14 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
}
// At this point, we have done a *logical* shift right. We now need to
// sign extend the result so that we get behavior equivalent to an
- // arithmetic shift right. Post-shifting by Amt16, our i8 elements are
- // `8-Amt16` bits wide.
+ // arithmetic shift right. Post-shifting by AmtWide, our narrow elements
+ // are `EltSizeInBits-AmtWide` bits wide.
//
- // To convert our `8-Amt16` bit unsigned numbers to 8-bit signed numbers,
- // we need to replicate the bit at position `7-Amt16` into the MSBs of
- // each i8.
- // We can use the following trick to accomplish this:
- // SignBitMask = 1 << (7-Amt16)
+ // To convert our `EltSizeInBits-AmtWide` bit unsigned numbers to signed
+ // numbers as wide as `EltSizeInBits`, we need to replicate the bit at
+ // position `EltSizeInBits-AmtWide` into the MSBs of each narrow lane. We
+ // can use the following trick to accomplish this:
+ // SignBitMask = 1 << (EltSizeInBits-AmtWide-1)
// (Masked ^ SignBitMask) - SignBitMask
//
// When the sign bit is already clear, this will compute:
@@ -29977,7 +30006,8 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
//
// This is equal to Masked - 2*SignBitMask which will correctly sign
// extend our result.
- SDValue CstHighBit = DAG.getConstant(0x80, dl, MVT::i8);
+ SDValue CstHighBit =
+ DAG.getConstant(1 << (EltSizeInBits - 1), dl, NarrowScalarVT);
SDValue SplatHighBit = DAG.getSplat(VT, dl, CstHighBit);
// This does not induce recursion, all operands are constants.
SDValue SignBitMask = DAG.getNode(LogicalOpc, dl, VT, SplatHighBit, Amt);
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index 1ddc6dbe82423b..4d4739232e7a6f 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -1492,6 +1492,88 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
ret <8 x i16> %shift
}
+define <8 x i16> @constant_shift_v8i16_pairs(<8 x i16> %a) nounwind {
+; SSE2-LABEL: constant_shift_v8i16_pairs:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [512,512,u,u,8192,8192,1024,1024]
+; SSE2-NEXT: pmulhw %xmm0, %xmm1
+; SSE2-NEXT: psraw $1, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: constant_shift_v8i16_pairs:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psraw $1, %xmm1
+; SSE41-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [512,512,u,u,8192,8192,1024,1024]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v8i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [512,512,u,u,8192,8192,1024,1024]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v8i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [256,256,16384,16384,4096,4096,512,512]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v8i16_pairs:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshaw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm1 = [256,256,16384,16384,4096,4096,512,512]
+; AVX512DQ-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm1 = [7,7,1,1,3,3,6,6]
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [256,256,16384,16384,4096,4096,512,512]
+; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512DQVL-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v8i16_pairs:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [512,512,u,u,8192,8192,1024,1024]
+; X86-SSE-NEXT: pmulhw %xmm0, %xmm1
+; X86-SSE-NEXT: psraw $1, %xmm0
+; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
+; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,3]
+; X86-SSE-NEXT: retl
+ %shift = ashr <8 x i16> %a, <i16 7, i16 7, i16 1, i16 1, i16 3, i16 3, i16 6, i16 6>
+ ret <8 x i16> %shift
+}
+
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE-LABEL: constant_shift_v16i8:
; SSE: # %bb.0:
@@ -1679,6 +1761,86 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind {
ret <16 x i8> %shift
}
+define <16 x i8> @constant_shift_v16i8_quads(<16 x i8> %a) nounwind {
+; SSE2-LABEL: constant_shift_v16i8_quads:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [u,u,32768,32768,8192,8192,16384,16384]
+; SSE2-NEXT: pmulhuw %xmm0, %xmm1
+; SSE2-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE2-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE2-NEXT: movaps {{.*#+}} xmm0 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; SSE2-NEXT: xorps %xmm0, %xmm1
+; SSE2-NEXT: psubb %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: constant_shift_v16i8_quads:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [u,u,32768,32768,8192,8192,16384,16384]
+; SSE41-NEXT: pmulhuw %xmm0, %xmm1
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; SSE41-NEXT: pxor %xmm1, %xmm0
+; SSE41-NEXT: psubb %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v16i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [u,u,32768,32768,8192,8192,16384,16384]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v16i8_quads:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v16i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; AVX512VL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm0
+; AVX512VL-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v16i8_quads:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [u,u,32768,32768,8192,8192,16384,16384]
+; X86-SSE-NEXT: pmulhuw %xmm0, %xmm1
+; X86-SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; X86-SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE-NEXT: movaps {{.*#+}} xmm0 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; X86-SSE-NEXT: xorps %xmm0, %xmm1
+; X86-SSE-NEXT: psubb %xmm0, %xmm1
+; X86-SSE-NEXT: movdqa %xmm1, %xmm0
+; X86-SSE-NEXT: retl
+ %shift = ashr <16 x i8> %a, <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3, i8 2, i8 2, i8 2, i8 2>
+ ret <16 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
index afe12ddb0766b3..1ebefe6d0d0d3a 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -1623,6 +1623,96 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
ret <16 x i16> %shift
}
+define <16 x i16> @constant_shift_v16i16_pairs(<16 x i16> %a) nounwind {
+; AVX1-LABEL: constant_shift_v16i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [u,u,u,u,8192,8192,16384,16384]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [1024,1024,512,512,2048,2048,4096,4096]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,16384,16384,4096,4096,8192,8192,512,512,256,256,1024,1024,2048,2048]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshaw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshaw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpshaw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vpshaw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,16384,16384,4096,4096,8192,8192,512,512,256,256,1024,1024,2048,2048]
+; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,1,1,3,3,2,2,6,6,7,7,5,5,4,4]
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,16384,16384,4096,4096,8192,8192,512,512,256,256,1024,1024,2048,2048]
+; AVX512DQVL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512DQVL-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [u,u,u,u,8192,8192,16384,16384]
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [1024,1024,512,512,2048,2048,4096,4096]
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [32768,32768,16384,16384,4096,4096,8192,8192,512,512,256,256,1024,1024,2048,2048]
+; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = ashr <16 x i16> %a, <i16 0, i16 0, i16 1, i16 1, i16 3, i16 3, i16 2, i16 2, i16 6, i16 6, i16 7, i16 7, i16 5, i16 5, i16 4, i16 4>
+ ret <16 x i16> %shift
+}
+
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
; AVX1: # %bb.0:
@@ -1879,6 +1969,95 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
ret <32 x i8> %shift
}
+define <32 x i8> @constant_shift_v32i8_quads(<32 x i8> %a) nounwind {
+; AVX1-LABEL: constant_shift_v32i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [1024,1024,512,512,2048,2048,4096,4096]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2 # [u,u,32768,32768,8192,8192,16384,16384]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v32i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32,2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8_quads:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8_quads:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vpshab {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32,2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v32i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32,2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; AVX512VL-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
+; AVX512VL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v32i8_quads:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # [1024,1024,512,512,2048,2048,4096,4096]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; X86-AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm2 # [u,u,32768,32768,8192,8192,16384,16384]
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32]
+; X86-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v32i8_quads:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,64,64,64,64,16,16,16,16,32,32,32,32,2,2,2,2,1,1,1,1,4,4,4,4,8,8,8,8]
+; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = ashr <32 x i8> %a, <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3, i8 2, i8 2, i8 2, i8 2, i8 6, i8 6, i8 6, i8 6, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 4, i8 4, i8 4, i8 4>
+ ret <32 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
index b4c0cf9a40fc8a..b70407c0b96a42 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -360,6 +360,26 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
ret <32 x i16> %shift
}
+define <32 x i16> @constant_shift_v32i16_pairs(<32 x i16> %a) nounwind {
+; AVX512DQ-LABEL: constant_shift_v32i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [128,128,128,128,64,64,64,64,32,32,32,32,16,16,16,16,8,8,8,8,4,4,4,4,2,2,2,2,1,1,1,1]
+; AVX512DQ-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vpsubw %ymm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512DQ-NEXT: vpsubw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsravw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+ %shift = ashr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 9, i16 9, i16 9, i16 9, i16 10, i16 10, i16 10, i16 10, i16 11, i16 11, i16 11, i16 11, i16 12, i16 12, i16 12, i16 12, i16 13, i16 13, i16 13, i16 13, i16 14, i16 14, i16 14, i16 14, i16 15, i16 15, i16 15, i16 15>
+ ret <32 x i16> %shift
+}
+
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
; AVX512DQ: # %bb.0:
@@ -437,6 +457,29 @@ define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
ret <64 x i8> %shift
}
+define <64 x i8> @constant_shift_v64i8_quads(<64 x i8> %a) nounwind {
+; AVX512DQ-LABEL: constant_shift_v64i8_quads:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,4,4,4,4,32,32,32,32,1,1,1,1,1,1,1,1,4,4,4,4,1,1,1,1,4,4,4,4,8,8,8,8,16,16,16,16,16,16,16,16,2,2,2,2,64,64,64,64,4,4,4,4,32,32,32,32,128,128,128,128]
+; AVX512DQ-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512DQ-NEXT: vpsubb %ymm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm0
+; AVX512DQ-NEXT: vpsubb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v64i8_quads:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,16,16,16,4,4,4,4,32,32,32,32,1,1,1,1,1,1,1,1,4,4,4,4,1,1,1,1,4,4,4,4,8,8,8,8,16,16,16,16,16,16,16,16,2,2,2,2,64,64,64,64,4,4,4,4,32,32,32,32,128,128,128,128]
+; AVX512BW-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+ %shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 5, i8 5, i8 5, i8 5, i8 2, i8 2, i8 2, i8 2, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 4, i8 4, i8 4, i8 4, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 6, i8 6, i8 6, i8 6, i8 1, i8 1, i8 1, i8 1, i8 5, i8 5, i8 5, i8 5, i8 2, i8 2, i8 2, i8 2, i8 0, i8 0, i8 0, i8 0>
+ ret <64 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index f9e132c9aa6218..4caa7da4ce136a 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -1162,6 +1162,62 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
ret <4 x i32> %shift
}
+define <8 x i16> @constant_shift_v8i16_pairs(<8 x i16> %a) nounwind {
+; SSE-LABEL: constant_shift_v8i16_pairs:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32768,32768,8192,8192,16384,16384,4096,4096]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v8i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32768,32768,8192,8192,16384,16384,4096,4096]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v8i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v8i16_pairs:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm1 = [1,1,3,3,2,2,4,4]
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v8i16_pairs:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [32768,32768,8192,8192,16384,16384,4096,4096]
+; X86-SSE-NEXT: retl
+ %shift = lshr <8 x i16> %a, <i16 1, i16 1, i16 3, i16 3, i16 2, i16 2, i16 4, i16 4>
+ ret <8 x i16> %shift
+}
+
define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; SSE2-LABEL: constant_shift_v8i16:
; SSE2: # %bb.0:
@@ -1280,6 +1336,51 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind {
ret <16 x i8> %shift
}
+define <16 x i8> @constant_shift_v16i8_quads(<16 x i8> %a) nounwind {
+; SSE-LABEL: constant_shift_v16i8_quads:
+; SSE: # %bb.0:
+; SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16384,16384,4096,4096,32768,32768,8192,8192]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v16i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [16384,16384,4096,4096,32768,32768,8192,8192]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v16i8_quads:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v16i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v16i8_quads:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [16384,16384,4096,4096,32768,32768,8192,8192]
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: retl
+ %shift = lshr <16 x i8> %a, <i8 2, i8 2, i8 2, i8 2, i8 4, i8 4, i8 4, i8 4, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3>
+ ret <16 x i8> %shift
+}
+
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
; SSE2: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index e8adeeec8f7206..cc3b1a72e5b538 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -1267,6 +1267,79 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
ret <8 x i32> %shift
}
+define <16 x i16> @constant_shift_v16i16_pairs(<16 x i16> %a) nounwind {
+; AVX1-LABEL: constant_shift_v16i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [u,u,32768,32768,16384,16384,8192,8192]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4096,4096,2048,2048,1024,1024,512,512]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 # [u,u,32768,32768,16384,16384,8192,8192,4096,4096,2048,2048,1024,1024,512,512]
+; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
+; XOPAVX2-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [u,u,32768,32768,16384,16384,8192,8192]
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [4096,4096,2048,2048,1024,1024,512,512]
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = lshr <16 x i16> %a, <i16 0, i16 0, i16 1, i16 1, i16 2, i16 2, i16 3, i16 3, i16 4, i16 4, i16 5, i16 5, i16 6, i16 6, i16 7, i16 7>
+ ret <16 x i16> %shift
+}
+
define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX1-LABEL: constant_shift_v16i16:
; AVX1: # %bb.0:
@@ -1351,7 +1424,7 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [512,16384,4096,1024,32768,16384,8192,4096]
; AVX1-NEXT: vpmulhuw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [257,16191,3855,771,32639,16191,7967,3855]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15]
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpmulhuw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
@@ -1414,7 +1487,7 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [512,16384,4096,1024,32768,16384,8192,4096]
; X86-AVX1-NEXT: vpmulhuw %xmm2, %xmm1, %xmm1
-; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [257,16191,3855,771,32639,16191,7967,3855]
+; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15]
; X86-AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; X86-AVX1-NEXT: vpmulhuw %xmm2, %xmm0, %xmm0
; X86-AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
@@ -1430,6 +1503,72 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
ret <32 x i8> %shift
}
+define <32 x i8> @constant_shift_v32i8_quads(<32 x i8> %a) nounwind {
+; AVX1-LABEL: constant_shift_v32i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [8192,8192,16384,16384,32768,32768,u,u]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [512,512,1024,1024,2048,2048,4096,4096]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v32i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8_quads:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8_quads:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v32i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v32i8_quads:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [8192,8192,16384,16384,32768,32768,u,u]
+; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm0[6,7]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-AVX1-NEXT: vpmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [512,512,1024,1024,2048,2048,4096,4096]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v32i8_quads:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 2, i8 2, i8 2, i8 2, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 7, i8 7, i8 7, i8 7, i8 6, i8 6, i8 6, i8 6, i8 5, i8 5, i8 5, i8 5, i8 4, i8 4, i8 4, i8 4>
+ ret <32 x i8> %shift
+}
+
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
; AVX1: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
index 8b61540081a7c7..81dc63ba33cb03 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
@@ -306,6 +306,21 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
ret <32 x i16> %shift
}
+define <32 x i16> @constant_shift_v32i16_pairs(<32 x i16> %a) nounwind {
+; AVX512DQ-LABEL: constant_shift_v32i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsrlvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+ %shift = lshr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 9, i16 9, i16 9, i16 9, i16 10, i16 10, i16 10, i16 10, i16 11, i16 11, i16 11, i16 11, i16 12, i16 12, i16 12, i16 12, i16 13, i16 13, i16 13, i16 13, i16 14, i16 14, i16 14, i16 14, i16 15, i16 15, i16 15, i16 15>
+ ret <32 x i16> %shift
+}
+
define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8_pairs:
; AVX512DQ: # %bb.0:
@@ -315,9 +330,9 @@ define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
; AVX512DQ-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1
; AVX512DQ-NEXT: vpmulhuw %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [257,16191,3855,771,32639,16191,7967,3855,257,16191,3855,771,32639,16191,7967,3855,257,16191,3855,771,32639,16191,7967,3855,257,16191,3855,771,32639,16191,7967,3855]
+; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15,1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15,1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15,1,1,63,63,15,15,3,3,127,127,63,63,31,31,15,15]
; AVX512DQ-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512DQ-NEXT: vpandq %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpandq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v64i8_pairs:
@@ -329,6 +344,16 @@ define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
ret <64 x i8> %shift
}
+define <64 x i8> @constant_shift_v64i8_quads(<64 x i8> %a) nounwind {
+; ALL-LABEL: constant_shift_v64i8_quads:
+; ALL: # %bb.0:
+; ALL-NEXT: vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; ALL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; ALL-NEXT: retq
+ %shift = lshr <64 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 5, i8 5, i8 5, i8 5, i8 6, i8 6, i8 6, i8 6, i8 7, i8 7, i8 7, i8 7, i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 2, i8 2, i8 2, i8 2, i8 3, i8 3, i8 3, i8 3, i8 4, i8 4, i8 4, i8 4, i8 5, i8 5, i8 5, i8 5, i8 6, i8 6, i8 6, i8 6, i8 7, i8 7, i8 7, i8 7, i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 2, i8 2, i8 2, i8 2, i8 3, i8 3, i8 3, i8 3>
+ ret <64 x i8> %shift
+}
+
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
; AVX512DQ: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 4279f08196f5c4..902bf8a0e55ce8 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -1090,6 +1090,62 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
ret <8 x i16> %shift
}
+define <8 x i16> @constant_shift_v8i16_pairs(<8 x i16> %a) nounwind {
+; SSE-LABEL: constant_shift_v8i16_pairs:
+; SSE: # %bb.0:
+; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [32,32,4,4,2,2,16,16]
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v8i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [32,32,4,4,2,2,16,16]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v8i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v8i16_pairs:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} xmm1 = [5,5,2,2,1,1,4,4]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v8i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v8i16_pairs:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [32,32,4,4,2,2,16,16]
+; X86-SSE-NEXT: retl
+ %shift = shl <8 x i16> %a, <i16 5, i16 5, i16 2, i16 2, i16 1, i16 1, i16 4, i16 4>
+ ret <8 x i16> %shift
+}
+
define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_shift_v16i8:
; SSE2: # %bb.0:
@@ -1242,6 +1298,51 @@ define <16 x i8> @constant_shift_v16i8_pairs(<16 x i8> %a) nounwind {
ret <16 x i8> %shift
}
+define <16 x i8> @constant_shift_v16i8_quads(<16 x i8> %a) nounwind {
+; SSE-LABEL: constant_shift_v16i8_quads:
+; SSE: # %bb.0:
+; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [4,4,8,8,1,1,2,2]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: constant_shift_v16i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [4,4,8,8,1,1,2,2]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
+;
+; XOP-LABEL: constant_shift_v16i8_quads:
+; XOP: # %bb.0:
+; XOP-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v16i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v16i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT: retq
+;
+; X86-SSE-LABEL: constant_shift_v16i8_quads:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [4,4,8,8,1,1,2,2]
+; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: retl
+ %shift = shl <16 x i8> %a, <i8 2, i8 2, i8 2, i8 2, i8 3, i8 3, i8 3, i8 3, i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index 024a50d77e7c05..b4880a6cad70e3 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -1233,6 +1233,76 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
ret <16 x i16> %shift
}
+define <16 x i16> @constant_shift_v16i16_pairs(<16 x i16> %a) nounwind {
+; AVX1-LABEL: constant_shift_v16i16_pairs:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [4,4,8,8,1,1,2,2]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [64,64,128,128,16,16,32,32]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v16i16_pairs:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16_pairs:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 # [4,4,8,8,1,1,2,2,64,64,128,128,16,16,32,32]
+; XOPAVX2-NEXT: retq
+;
+; AVX512DQ-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [2,2,3,3,0,0,1,1,6,6,7,7,4,4,5,5]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512BW-NEXT: retq
+;
+; AVX512DQVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQVL-NEXT: retq
+;
+; AVX512BWVL-LABEL: constant_shift_v16i16_pairs:
+; AVX512BWVL: # %bb.0:
+; AVX512BWVL-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512BWVL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [4,4,8,8,1,1,2,2]
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [64,64,128,128,16,16,32,32]
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v16i16_pairs:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = shl <16 x i16> %a, <i16 2, i16 2, i16 3, i16 3, i16 0, i16 0, i16 1, i16 1, i16 6, i16 6, i16 7, i16 7, i16 4, i16 4, i16 5, i16 5>
+ ret <16 x i16> %shift
+}
+
define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: constant_shift_v32i8:
; AVX1: # %bb.0:
@@ -1420,6 +1490,70 @@ define <32 x i8> @constant_shift_v32i8_pairs(<32 x i8> %a) nounwind {
ret <32 x i8> %shift
}
+define <32 x i8> @constant_shift_v32i8_quads(<32 x i8> %a) nounwind {
+; AVX1-LABEL: constant_shift_v32i8_quads:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 # [4,4,8,8,1,1,2,2]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [64,64,128,128,16,16,32,32]
+; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: constant_shift_v32i8_quads:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8_quads:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8_quads:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vpshlb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: constant_shift_v32i8_quads:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: constant_shift_v32i8_quads:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512VL-NEXT: retq
+;
+; X86-AVX1-LABEL: constant_shift_v32i8_quads:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 # [4,4,8,8,1,1,2,2]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; X86-AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # [64,64,128,128,16,16,32,32]
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; X86-AVX1-NEXT: retl
+;
+; X86-AVX2-LABEL: constant_shift_v32i8_quads:
+; X86-AVX2: # %bb.0:
+; X86-AVX2-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: retl
+ %shift = shl <32 x i8> %a, <i8 2, i8 2, i8 2, i8 2, i8 3, i8 3, i8 3, i8 3, i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 6, i8 6, i8 6, i8 6, i8 7, i8 7, i8 7, i8 7, i8 4, i8 4, i8 4, i8 4, i8 5, i8 5, i8 5, i8 5>
+ ret <32 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
index f5ff4bdaecc748..cd729ebbc4330a 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -293,6 +293,21 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
ret <32 x i16> %shift
}
+define <32 x i16> @constant_shift_v32i16_pairs(<32 x i16> %a) nounwind {
+; AVX512DQ-LABEL: constant_shift_v32i16_pairs:
+; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16_pairs:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: retq
+ %shift = shl <32 x i16> %a, <i16 0, i16 0, i16 1, i16 1, i16 3, i16 3, i16 2, i16 2, i16 6, i16 6, i16 7, i16 7, i16 5, i16 5, i16 4, i16 4, i16 12, i16 12, i16 13, i16 13, i16 15, i16 15, i16 14, i16 14, i16 10, i16 10, i16 11, i16 11, i16 9, i16 9, i16 8, i16 8>
+ ret <32 x i16> %shift
+}
+
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v64i8:
; AVX512DQ: # %bb.0:
@@ -342,6 +357,16 @@ define <64 x i8> @constant_shift_v64i8_pairs(<64 x i8> %a) nounwind {
ret <64 x i8> %shift
}
+define <64 x i8> @constant_shift_v64i8_quads(<64 x i8> %a) nounwind {
+; ALL-LABEL: constant_shift_v64i8_quads:
+; ALL: # %bb.0:
+; ALL-NEXT: vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; ALL-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; ALL-NEXT: retq
+ %shift = shl <64 x i8> %a, <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 3, i8 3, i8 3, i8 3, i8 2, i8 2, i8 2, i8 2, i8 6, i8 6, i8 6, i8 6, i8 7, i8 7, i8 7, i8 7, i8 5, i8 5, i8 5, i8 5, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 5, i8 5, i8 5, i8 5, i8 7, i8 7, i8 7, i8 7, i8 6, i8 6, i8 6, i8 6, i8 2, i8 2, i8 2, i8 2, i8 3, i8 3, i8 3, i8 3, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0>
+ ret <64 x i8> %shift
+}
+
;
; Uniform Constant Shifts
;
More information about the llvm-commits
mailing list