[llvm] r340813 - [X86][SSE] Improve variable scalar shift of vXi8 vectors (PR34694)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 28 03:37:29 PDT 2018
Author: rksimon
Date: Tue Aug 28 03:37:29 2018
New Revision: 340813
URL: http://llvm.org/viewvc/llvm-project?rev=340813&view=rev
Log:
[X86][SSE] Improve variable scalar shift of vXi8 vectors (PR34694)
This patch creates the shift mask and actual shift using the vXi16 vector shift ops.
Differential Revision: https://reviews.llvm.org/D51263
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Aug 28 03:37:29 2018
@@ -20445,23 +20445,24 @@ static SDValue getTargetVShiftNode(unsig
// Need to build a vector containing shift amount.
// SSE/AVX packed shifts only use the lower 64-bit of the shift count.
- // +=================+============+=======================================+
- // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
- // +=================+============+=======================================+
- // | i64 | Yes, No | Use ShAmt as lowest elt |
- // | i32 | Yes | zero-extend in-reg |
- // | (i32 zext(i16)) | Yes | zero-extend in-reg |
- // | (i32 zext(i16)) | No | byte-shift-in-reg |
- // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
- // +=================+============+=======================================+
+ // +====================+============+=======================================+
+ // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
+ // +====================+============+=======================================+
+ // | i64 | Yes, No | Use ShAmt as lowest elt |
+ // | i32 | Yes | zero-extend in-reg |
+ // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
+ // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
+ // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
+ // +====================+============+=======================================+
if (SVT == MVT::i64)
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
+ (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
+ ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
ShAmt = ShAmt.getOperand(0);
- MVT AmtTy = MVT::v8i16;
+ MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
if (Subtarget.hasSSE41())
ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64);
@@ -23467,8 +23468,8 @@ static SDValue LowerScalarVariableShift(
Amt = peekThroughEXTRACT_SUBVECTORs(Amt);
- if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
- if (SDValue BaseShAmt = IsSplatValue(VT, Amt, dl, DAG, Subtarget, Opcode)) {
+ if (SDValue BaseShAmt = IsSplatValue(VT, Amt, dl, DAG, Subtarget, Opcode)) {
+ if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
MVT EltVT = VT.getVectorElementType();
assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
@@ -23478,6 +23479,50 @@ static SDValue LowerScalarVariableShift(
return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
}
+
+ // vXi8 shifts - shift as v8i16 + mask result.
+ if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
+ (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
+ VT == MVT::v64i8) &&
+ !Subtarget.hasXOP()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
+ if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
+ unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
+ unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
+ BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
+
+ // Create the mask using vXi16 shifts. For shift-rights we need to move
+ // the upper byte down before splatting the vXi8 mask.
+ SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
+ BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
+ BaseShAmt, Subtarget, DAG);
+ if (Opcode != ISD::SHL)
+ BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
+ 8, DAG);
+ BitMask = DAG.getBitcast(VT, BitMask);
+ BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
+ SmallVector<int, 64>(NumElts, 0));
+
+ SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
+ DAG.getBitcast(ExtVT, R), BaseShAmt,
+ Subtarget, DAG);
+ Res = DAG.getBitcast(VT, Res);
+ Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
+
+ if (Opcode == ISD::SRA) {
+ // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
+ // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
+ SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
+ SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
+ BaseShAmt, Subtarget, DAG);
+ SignMask = DAG.getBitcast(VT, SignMask);
+ Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
+ Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
+ }
+ return Res;
+ }
+ }
}
// Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll Tue Aug 28 03:37:29 2018
@@ -467,74 +467,42 @@ define <64 x i8> @splatvar_rotate_v64i8(
;
; AVX512BW-LABEL: splatvar_rotate_v64i8:
; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm2
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm3
+; AVX512BW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512BW-NEXT: vpsllw %xmm1, %zmm4, %zmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
-; AVX512BW-NEXT: vpsllw $2, %zmm3, %zmm4
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
-; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm1
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
-; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k2
-; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovb2m %zmm2, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512BW-NEXT: vporq %zmm0, %zmm3, %zmm0
+; AVX512BW-NEXT: vpandq %zmm1, %zmm3, %zmm1
+; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpsrlw %xmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw %xmm2, %zmm4, %zmm2
+; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
+; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
+; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_rotate_v64i8:
; AVX512VLBW: # %bb.0:
+; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm2
+; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %zmm2, %zmm3, %zmm2
+; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm0, %zmm3
+; AVX512VLBW-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4
+; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm4, %zmm1
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
-; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2
-; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3
-; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm3, %zmm3
-; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k1}
-; AVX512VLBW-NEXT: vpsllw $2, %zmm3, %zmm4
-; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm4, %zmm4
-; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512VLBW-NEXT: vmovdqu8 %zmm4, %zmm3 {%k1}
-; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1}
-; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm1
-; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2
-; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
-; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2
-; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1
-; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2}
-; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm1
-; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm1
-; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2
-; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1
-; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0
+; AVX512VLBW-NEXT: vpandq %zmm1, %zmm3, %zmm1
+; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512VLBW-NEXT: vpsrlw %xmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpsrlw %xmm2, %zmm4, %zmm2
+; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
+; AVX512VLBW-NEXT: vpbroadcastb %xmm2, %zmm2
+; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512VLBW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%splat8 = sub <64 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll Tue Aug 28 03:37:29 2018
@@ -785,160 +785,63 @@ define <8 x i16> @splatvar_shift_v8i16(<
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; SSE2-NEXT: psllw $5, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm1, %xmm6
-; SSE2-NEXT: psraw $4, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm1, %xmm6
-; SSE2-NEXT: psraw $2, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: paddw %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtw %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: psraw $1, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $4, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtw %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm5
-; SSE2-NEXT: psraw $2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: paddw %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtw %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psraw $1, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: psrlw %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psrlw %xmm1, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm1, %xmm0
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; SSE2-NEXT: psrlw %xmm1, %xmm2
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: psubb %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pshufb %xmm0, %xmm1
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $4, %xmm4
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $2, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psraw $1, %xmm4
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $4, %xmm2
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $2, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psraw $1, %xmm2
-; SSE41-NEXT: paddw %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psrlw %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: psrlw %xmm1, %xmm2
+; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT: pand %xmm2, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; SSE41-NEXT: psrlw %xmm1, %xmm2
+; SSE41-NEXT: pxor %xmm2, %xmm0
+; SSE41-NEXT: psubb %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX2-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX2-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
; AVX2-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX2-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX2-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpbroadcastb %xmm2, %xmm2
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
+; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
@@ -1000,63 +903,20 @@ define <16 x i8> @splatvar_shift_v16i8(<
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; X32-SSE-NEXT: psllw $5, %xmm3
-; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm1, %xmm6
-; X32-SSE-NEXT: psraw $4, %xmm1
-; X32-SSE-NEXT: pand %xmm5, %xmm1
-; X32-SSE-NEXT: por %xmm6, %xmm1
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm1, %xmm6
-; X32-SSE-NEXT: psraw $2, %xmm1
-; X32-SSE-NEXT: pand %xmm5, %xmm1
-; X32-SSE-NEXT: por %xmm6, %xmm1
-; X32-SSE-NEXT: paddw %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtw %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm4
-; X32-SSE-NEXT: pandn %xmm1, %xmm4
-; X32-SSE-NEXT: psraw $1, %xmm1
-; X32-SSE-NEXT: pand %xmm5, %xmm1
-; X32-SSE-NEXT: por %xmm4, %xmm1
-; X32-SSE-NEXT: psrlw $8, %xmm1
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm5
-; X32-SSE-NEXT: psraw $2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm5, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtw %xmm3, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psraw $1, %xmm0
+; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: psrlw %xmm1, %xmm0
+; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT: psrlw %xmm1, %xmm2
+; X32-SSE-NEXT: psrlw $8, %xmm2
+; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: packuswb %xmm1, %xmm0
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; X32-SSE-NEXT: psrlw %xmm1, %xmm2
+; X32-SSE-NEXT: pxor %xmm2, %xmm0
+; X32-SSE-NEXT: psubb %xmm2, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll Tue Aug 28 03:37:29 2018
@@ -860,80 +860,37 @@ define <16 x i16> @splatvar_shift_v16i16
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
-; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
-; AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm9
-; AVX1-NEXT: vpblendvb %xmm9, %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm8
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; AVX1-NEXT: vpsraw $4, %xmm3, %xmm5
-; AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsraw $2, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsraw $1, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm8, %xmm3, %xmm8
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpsraw $4, %xmm5, %xmm3
-; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm5, %xmm2
-; AVX1-NEXT: vpsraw $2, %xmm2, %xmm3
-; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsraw $1, %xmm2, %xmm3
-; AVX1-NEXT: vpblendvb %xmm9, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX1-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
@@ -962,31 +919,17 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
@@ -1000,31 +943,17 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
; AVX512DQVL: # %bb.0:
-; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQVL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQVL-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512DQVL-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512DQVL-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512DQVL-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512DQVL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512DQVL-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQVL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQVL-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512DQVL-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512DQVL-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX512DQVL-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
@@ -1038,80 +967,37 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
-; X32-AVX1-NEXT: vpsraw $4, %xmm4, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm5, %xmm4, %xmm4
-; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X32-AVX1-NEXT: vpsraw $4, %xmm5, %xmm6
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
-; X32-AVX1-NEXT: vpsraw $2, %xmm4, %xmm6
-; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
-; X32-AVX1-NEXT: vpsraw $2, %xmm5, %xmm6
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm5
-; X32-AVX1-NEXT: vpsraw $1, %xmm4, %xmm6
-; X32-AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
-; X32-AVX1-NEXT: vpsraw $1, %xmm5, %xmm6
-; X32-AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm5, %xmm2
-; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; X32-AVX1-NEXT: vpsraw $4, %xmm3, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsraw $2, %xmm3, %xmm5
-; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsraw $1, %xmm3, %xmm5
-; X32-AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm5
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsrlw $8, %xmm4, %xmm1
-; X32-AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
-; X32-AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; X32-AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [32896,32896,32896,32896,32896,32896,32896,32896]
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpsubb %xmm4, %xmm0, %xmm0
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
; X32-AVX2: # %bb.0:
-; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; X32-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; X32-AVX2-NEXT: vpsraw $4, %ymm3, %ymm4
-; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; X32-AVX2-NEXT: vpsraw $2, %ymm3, %ymm4
-; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; X32-AVX2-NEXT: vpsraw $1, %ymm3, %ymm4
-; X32-AVX2-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; X32-AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; X32-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; X32-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; X32-AVX2-NEXT: vpsraw $4, %ymm0, %ymm3
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsraw $2, %ymm0, %ymm3
-; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsraw $1, %ymm0, %ymm3
-; X32-AVX2-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; X32-AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; X32-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll Tue Aug 28 03:37:29 2018
@@ -181,83 +181,36 @@ define <32 x i16> @splatvar_shift_v32i16
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
-; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpsraw $2, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm3, %ymm3, %ymm6
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpsraw $1, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm6, %ymm6, %ymm7
-; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm8
-; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpaddw %ymm8, %ymm8, %ymm9
-; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackuswb %ymm4, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512DQ-NEXT: vpsraw $4, %ymm4, %ymm5
-; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512DQ-NEXT: vpsraw $4, %ymm1, %ymm4
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsraw $2, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsraw $1, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
+; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpxor %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsubb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpxor %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsubb %ymm4, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
+; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpbroadcastb %xmm2, %zmm2
+; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896,32896]
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpxorq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = ashr <64 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll Tue Aug 28 03:37:29 2018
@@ -652,98 +652,47 @@ define <8 x i16> @splatvar_shift_v8i16(<
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
-; SSE2-NEXT: psllw $5, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: psrlw %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psrlw %xmm1, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pshufb %xmm0, %xmm1
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm4
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psrlw $2, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psrlw $1, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: paddb %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psrlw %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: psrlw %xmm1, %xmm2
+; SSE41-NEXT: pshufb {{.*#+}} xmm2 = xmm2[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsrlw %xmm1, %xmm2, %xmm1
+; AVX2-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
@@ -805,36 +754,16 @@ define <16 x i8> @splatvar_shift_v16i8(<
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: psllw $5, %xmm2
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: psrlw %xmm1, %xmm0
+; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT: psrlw %xmm1, %xmm2
+; X32-SSE-NEXT: psrlw $8, %xmm2
+; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll Tue Aug 28 03:37:29 2018
@@ -704,51 +704,27 @@ define <16 x i16> @splatvar_shift_v16i16
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm6, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
@@ -777,19 +753,13 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
@@ -803,19 +773,13 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; AVX512DQVL-NEXT: vpsrlw $8, %ymm1, %ymm1
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQVL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
@@ -829,51 +793,27 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; X32-AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpsrlw $2, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
-; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsrlw $2, %xmm0, %xmm4
-; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; X32-AVX1-NEXT: vpsrlw $1, %xmm2, %xmm4
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm4
-; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsrlw $1, %xmm0, %xmm4
-; X32-AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
; X32-AVX2: # %bb.0:
+; X32-AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm1
+; X32-AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; X32-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll Tue Aug 28 03:37:29 2018
@@ -145,51 +145,26 @@ define <32 x i16> @splatvar_shift_v32i16
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
-; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm3
-; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
+; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
-; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = lshr <64 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll Tue Aug 28 03:37:29 2018
@@ -569,94 +569,47 @@ define <8 x i16> @splatvar_shift_v8i16(<
define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_shift_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
-; SSE2-NEXT: psllw $5, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psllw $4, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psllw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: paddb %xmm0, %xmm0
+; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE2-NEXT: psllw %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT: psllw %xmm1, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_shift_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pshufb %xmm0, %xmm1
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: paddb %xmm1, %xmm3
-; SSE41-NEXT: movdqa %xmm2, %xmm4
-; SSE41-NEXT: psllw $4, %xmm4
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: psllw $2, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; SSE41-NEXT: psllw %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE41-NEXT: psllw %xmm1, %xmm2
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pshufb %xmm1, %xmm2
+; SSE41-NEXT: pand %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_shift_v16i8:
; AVX1: # %bb.0:
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm1
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v16i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpsllw %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX2-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v16i8:
@@ -715,35 +668,15 @@ define <16 x i8> @splatvar_shift_v16i8(<
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: psllw $5, %xmm2
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: paddb %xmm0, %xmm0
+; X32-SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
+; X32-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; X32-SSE-NEXT: psllw %xmm1, %xmm0
+; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2
+; X32-SSE-NEXT: psllw %xmm1, %xmm2
+; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll Tue Aug 28 03:37:29 2018
@@ -636,47 +636,27 @@ define <16 x i16> @splatvar_shift_v16i16
define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: splatvar_shift_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm6, %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_shift_v32i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX2-NEXT: vpsllw %xmm1, %ymm2, %ymm1
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_shift_v32i8:
@@ -701,18 +681,12 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQ-LABEL: splatvar_shift_v32i8:
; AVX512DQ: # %bb.0:
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw %xmm1, %ymm2, %ymm1
; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v32i8:
@@ -726,18 +700,12 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; AVX512DQVL-LABEL: splatvar_shift_v32i8:
; AVX512DQVL: # %bb.0:
+; AVX512DQVL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQVL-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512DQVL-NEXT: vpsllw %xmm1, %ymm2, %ymm1
; AVX512DQVL-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512DQVL-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512DQVL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512DQVL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512DQVL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQVL-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512DQVL-NEXT: retq
;
; AVX512BWVL-LABEL: splatvar_shift_v32i8:
@@ -751,47 +719,27 @@ define <32 x i8> @splatvar_shift_v32i8(<
;
; X32-AVX1-LABEL: splatvar_shift_v32i8:
; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; X32-AVX1-NEXT: vpsllw $4, %xmm2, %xmm3
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsllw $2, %xmm2, %xmm3
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; X32-AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
-; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
-; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
-; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; X32-AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm1
-; X32-AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm1, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm1
-; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpsllw %xmm1, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X32-AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; X32-AVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; X32-AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-AVX1-NEXT: retl
;
; X32-AVX2-LABEL: splatvar_shift_v32i8:
; X32-AVX2: # %bb.0:
+; X32-AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; X32-AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
+; X32-AVX2-NEXT: vpsllw %xmm1, %ymm2, %ymm1
; X32-AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
-; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; X32-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; X32-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; X32-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; X32-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-AVX2-NEXT: retl
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll?rev=340813&r1=340812&r2=340813&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll Tue Aug 28 03:37:29 2018
@@ -140,46 +140,24 @@ define <32 x i16> @splatvar_shift_v32i16
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512DQ-LABEL: splatvar_shift_v64i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
-; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm7
-; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsllw $4, %ymm1, %ymm3
-; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsllw $2, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm2
-; AVX512DQ-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT: vpsllw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsllw %xmm2, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpbroadcastb %xmm3, %ymm3
+; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpand %ymm3, %ymm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: splatvar_shift_v64i8:
; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT: vpsllw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsllw %xmm1, %zmm2, %zmm1
; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
-; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsllw $2, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
%shift = shl <64 x i8> %a, %splat
More information about the llvm-commits
mailing list