[llvm] r335957 - [X86][SSE] Support v16i8/v32i8 vector rotations
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 29 02:36:39 PDT 2018
Author: rksimon
Date: Fri Jun 29 02:36:39 2018
New Revision: 335957
URL: http://llvm.org/viewvc/llvm-project?rev=335957&view=rev
Log:
[X86][SSE] Support v16i8/v32i8 vector rotations
This uses the same technique as for shifts - split the rotation into 4/2/1-bit partial rotations and select those partials based on the amount bit, making use of PBLENDVB if available. This halves the use of PBLENDVB compared to expanding to shifts, which can be a slow op.
Unfortunately I haven't found a decent way to share much of this code with the shift equivalent.
Differential Revision: https://reviews.llvm.org/D48655
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=335957&r1=335956&r2=335957&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jun 29 02:36:39 2018
@@ -907,6 +907,7 @@ X86TargetLowering::X86TargetLowering(con
setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
+ setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
@@ -1040,6 +1041,7 @@ X86TargetLowering::X86TargetLowering(con
setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
+ setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
@@ -23869,9 +23871,10 @@ static SDValue LowerRotate(SDValue Op, c
if (VT.is256BitVector() && !Subtarget.hasAVX2())
return Lower256IntArith(Op, DAG);
- assert((VT == MVT::v4i32 || VT == MVT::v8i16 ||
- ((VT == MVT::v8i32 || VT == MVT::v16i16) && Subtarget.hasAVX2())) &&
- "Only v4i32/v8i16/v8i32/v16i16 vector rotates supported");
+ assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
+ ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
+ Subtarget.hasAVX2())) &&
+ "Only vXi32/vXi16/vXi8 vector rotates supported");
// Rotate by an uniform constant - expand back to shifts.
// TODO - legalizers should be able to handle this.
@@ -23882,17 +23885,17 @@ static SDValue LowerRotate(SDValue Op, c
if (RotateAmt == 0)
return R;
- SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, VT, R,
- RotateAmt, DAG);
- SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, VT, R,
- EltSizeInBits - RotateAmt, DAG);
+ SDValue AmtR = DAG.getConstant(EltSizeInBits - RotateAmt, DL, VT);
+ SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
}
// Rotate by splat - expand back to shifts.
// TODO - legalizers should be able to handle this.
- if (IsSplatValue(VT, Amt, DL, DAG, Subtarget, Opcode)) {
+ if ((EltSizeInBits >= 16 || Subtarget.hasBWI()) &&
+ IsSplatValue(VT, Amt, DL, DAG, Subtarget, Opcode)) {
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
@@ -23900,6 +23903,72 @@ static SDValue LowerRotate(SDValue Op, c
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
+ // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
+ // the amount bit.
+ if (EltSizeInBits == 8) {
+ if (Subtarget.hasBWI()) {
+ SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
+ AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
+ SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
+ return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
+ }
+
+ MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
+
+ auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
+ if (Subtarget.hasSSE41()) {
+ // On SSE41 targets we make use of the fact that VSELECT lowers
+ // to PBLENDVB which selects bytes based just on the sign bit.
+ V0 = DAG.getBitcast(VT, V0);
+ V1 = DAG.getBitcast(VT, V1);
+ Sel = DAG.getBitcast(VT, Sel);
+ return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
+ }
+ // On pre-SSE41 targets we test for the sign bit by comparing to
+ // zero - a negative value will set all bits of the lanes to true
+ // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
+ SDValue Z = getZeroVector(SelVT, Subtarget, DAG, DL);
+ SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
+ return DAG.getSelect(DL, SelVT, C, V0, V1);
+ };
+
+ // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
+ // We can safely do this using i16 shifts as we're only interested in
+ // the 3 lower bits of each byte.
+ Amt = DAG.getBitcast(ExtVT, Amt);
+ Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
+ Amt = DAG.getBitcast(VT, Amt);
+
+ // r = VSELECT(r, rot(r, 4), a);
+ SDValue M;
+ M = DAG.getNode(
+ ISD::OR, DL, VT,
+ DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
+ DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
+ R = SignBitSelect(VT, Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
+
+ // r = VSELECT(r, rot(r, 2), a);
+ M = DAG.getNode(
+ ISD::OR, DL, VT,
+ DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
+ DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
+ R = SignBitSelect(VT, Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
+
+ // return VSELECT(r, rot(r, 1), a);
+ M = DAG.getNode(
+ ISD::OR, DL, VT,
+ DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
+ DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
+ return SignBitSelect(VT, Amt, M, R);
+ }
+
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll?rev=335957&r1=335956&r2=335957&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll Fri Jun 29 02:36:39 2018
@@ -425,198 +425,181 @@ define <8 x i16> @var_rotate_v8i16(<8 x
define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: var_rotate_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; SSE2-NEXT: psubb %xmm1, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psllw $5, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
-; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: psllw $4, %xmm5
; SSE2-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE2-NEXT: pand %xmm2, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: por %xmm5, %xmm2
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm2, %xmm6
-; SSE2-NEXT: psllw $2, %xmm2
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pandn %xmm2, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: psrlw $6, %xmm2
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: por %xmm6, %xmm2
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm1, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pand %xmm5, %xmm2
-; SSE2-NEXT: psllw $5, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
+; SSE2-NEXT: psllw $2, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: por %xmm2, %xmm4
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm4
+; SSE2-NEXT: pandn %xmm3, %xmm2
+; SSE2-NEXT: por %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: paddb %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $7, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: pandn %xmm2, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: var_rotate_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; SSE41-NEXT: psubb %xmm3, %xmm2
-; SSE41-NEXT: psllw $5, %xmm3
-; SSE41-NEXT: movdqa %xmm0, %xmm5
-; SSE41-NEXT: psllw $4, %xmm5
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE41-NEXT: movdqa %xmm0, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: psllw $2, %xmm5
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE41-NEXT: paddb %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm5
-; SSE41-NEXT: paddb %xmm4, %xmm5
-; SSE41-NEXT: paddb %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psllw $4, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: psllw $5, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm1, %xmm5
-; SSE41-NEXT: psrlw $4, %xmm5
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: paddb %xmm3, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: por %xmm4, %xmm1
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: psrlw $6, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psllw $2, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: paddb %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrlw $7, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: var_rotate_v16i8:
; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX-NEXT: vpsllw $4, %xmm0, %xmm3
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpsllw $2, %xmm3, %xmm4
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm3
-; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm4
+; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm1
-; AVX-NEXT: vpsllw $5, %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm4
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
-; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: var_rotate_v16i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
-; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512F-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v16i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
-; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
-; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512VL-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v16i8:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT: vpsllvw %ymm2, %ymm0, %ymm2
+; AVX512VLBW-NEXT: vpmovwb %ymm2, %xmm2
+; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
;
@@ -627,62 +610,45 @@ define <16 x i8> @var_rotate_v16i8(<16 x
;
; X32-SSE-LABEL: var_rotate_v16i8:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; X32-SSE-NEXT: psubb %xmm1, %xmm4
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
; X32-SSE-NEXT: psllw $5, %xmm1
+; X32-SSE-NEXT: pxor %xmm0, %xmm0
; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3
+; X32-SSE-NEXT: movdqa %xmm2, %xmm4
+; X32-SSE-NEXT: psrlw $4, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: movdqa %xmm2, %xmm5
; X32-SSE-NEXT: psllw $4, %xmm5
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5
-; X32-SSE-NEXT: pand %xmm2, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: por %xmm5, %xmm2
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm2, %xmm6
-; X32-SSE-NEXT: psllw $2, %xmm2
+; X32-SSE-NEXT: por %xmm4, %xmm5
+; X32-SSE-NEXT: pand %xmm3, %xmm5
+; X32-SSE-NEXT: pandn %xmm2, %xmm3
+; X32-SSE-NEXT: por %xmm5, %xmm3
+; X32-SSE-NEXT: movdqa %xmm3, %xmm2
+; X32-SSE-NEXT: psrlw $6, %xmm2
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: por %xmm6, %xmm2
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm1
-; X32-SSE-NEXT: pandn %xmm2, %xmm1
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pand %xmm5, %xmm2
-; X32-SSE-NEXT: psllw $5, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm3
; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
+; X32-SSE-NEXT: psllw $2, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: por %xmm2, %xmm4
+; X32-SSE-NEXT: paddb %xmm1, %xmm1
+; X32-SSE-NEXT: pxor %xmm2, %xmm2
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
+; X32-SSE-NEXT: pand %xmm2, %xmm4
+; X32-SSE-NEXT: pandn %xmm3, %xmm2
+; X32-SSE-NEXT: por %xmm4, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: paddb %xmm2, %xmm3
+; X32-SSE-NEXT: movdqa %xmm2, %xmm4
+; X32-SSE-NEXT: psrlw $7, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: por %xmm3, %xmm4
+; X32-SSE-NEXT: paddb %xmm1, %xmm1
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; X32-SSE-NEXT: pand %xmm0, %xmm4
+; X32-SSE-NEXT: pandn %xmm2, %xmm0
; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
; X32-SSE-NEXT: retl
%b8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %b
%shl = shl <16 x i8> %a, %b
@@ -1050,240 +1016,218 @@ define <8 x i16> @splatvar_rotate_v8i16(
define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v16i8:
; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; SSE2-NEXT: psubb %xmm4, %xmm3
-; SSE2-NEXT: psllw $5, %xmm4
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: psllw $4, %xmm5
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm5
-; SSE2-NEXT: pand %xmm1, %xmm5
-; SSE2-NEXT: pandn %xmm0, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm1, %xmm6
-; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: por %xmm6, %xmm1
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm4
-; SSE2-NEXT: pandn %xmm1, %xmm4
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pand %xmm5, %xmm1
-; SSE2-NEXT: psllw $5, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: psrlw $4, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm3
-; SSE2-NEXT: pandn %xmm0, %xmm3
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
+; SSE2-NEXT: psllw $4, %xmm3
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: psllw $5, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm4
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: pandn %xmm2, %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: psrlw $6, %xmm2
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: psllw $2, %xmm3
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT: por %xmm2, %xmm3
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm4, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm3
+; SSE2-NEXT: paddb %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: psrlw $7, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: paddb %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: pandn %xmm2, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: splatvar_rotate_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm3
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm1
; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pshufb %xmm0, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; SSE41-NEXT: psubb %xmm1, %xmm2
-; SSE41-NEXT: psllw $5, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: paddb %xmm1, %xmm4
-; SSE41-NEXT: movdqa %xmm3, %xmm6
-; SSE41-NEXT: psllw $4, %xmm6
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm6
-; SSE41-NEXT: movdqa %xmm3, %xmm5
+; SSE41-NEXT: pshufb %xmm0, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm6, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: psllw $2, %xmm1
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
-; SSE41-NEXT: movdqa %xmm5, %xmm1
-; SSE41-NEXT: paddb %xmm5, %xmm1
-; SSE41-NEXT: paddb %xmm4, %xmm4
-; SSE41-NEXT: movdqa %xmm4, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm5
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psllw $4, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
; SSE41-NEXT: psllw $5, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm1
-; SSE41-NEXT: paddb %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm3, %xmm4
-; SSE41-NEXT: psrlw $4, %xmm4
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
; SSE41-NEXT: movdqa %xmm2, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: paddb %xmm1, %xmm1
+; SSE41-NEXT: psrlw $6, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psllw $2, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: paddb %xmm1, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm3
+; SSE41-NEXT: psrlw $7, %xmm3
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm0, %xmm3
+; SSE41-NEXT: paddb %xmm2, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm3
-; SSE41-NEXT: por %xmm5, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: splatvar_rotate_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm1
-; AVX1-NEXT: vpsllw $2, %xmm1, %xmm4
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm4
-; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $5, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_rotate_v16i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpsllw $4, %xmm0, %xmm3
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
-; AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm3
-; AVX2-NEXT: vpsllw $2, %xmm3, %xmm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
+; AVX2-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-NEXT: vpor %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm4
+; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm1
-; AVX2-NEXT: vpsllw $5, %xmm2, %xmm2
-; AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm3
-; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm4, %xmm4
-; AVX2-NEXT: vpblendvb %xmm2, %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_rotate_v16i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
-; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512F-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v16i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
-; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm1
-; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
-; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
-; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpsllw $5, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX512VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm0, %xmm0, %xmm2
+; AVX512VL-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX512VL-NEXT: vpaddb %xmm1, %xmm1, %xmm1
+; AVX512VL-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_rotate_v16i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512BW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %xmm1, %xmm3, %xmm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_rotate_v16i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm2, %xmm2
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT: vpsllvw %ymm2, %ymm0, %ymm2
+; AVX512VLBW-NEXT: vpmovwb %ymm2, %xmm2
+; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %xmm1, %xmm3, %xmm1
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
-; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT: vpsrlvw %ymm1, %ymm0, %ymm0
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
;
@@ -1302,65 +1246,48 @@ define <16 x i8> @splatvar_rotate_v16i8(
;
; X32-SSE-LABEL: splatvar_rotate_v16i8:
; X32-SSE: # %bb.0:
+; X32-SSE-NEXT: movdqa %xmm0, %xmm2
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X32-SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,3,4,5,6,7]
-; X32-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; X32-SSE-NEXT: psubb %xmm4, %xmm3
-; X32-SSE-NEXT: psllw $5, %xmm4
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: psllw $4, %xmm5
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5
-; X32-SSE-NEXT: pand %xmm1, %xmm5
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
-; X32-SSE-NEXT: por %xmm5, %xmm1
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm1, %xmm6
-; X32-SSE-NEXT: psllw $2, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pand %xmm5, %xmm1
-; X32-SSE-NEXT: por %xmm6, %xmm1
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm4
-; X32-SSE-NEXT: pandn %xmm1, %xmm4
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pand %xmm5, %xmm1
-; X32-SSE-NEXT: psllw $5, %xmm3
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
+; X32-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
+; X32-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; X32-SSE-NEXT: movdqa %xmm2, %xmm0
; X32-SSE-NEXT: psrlw $4, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm2
; X32-SSE-NEXT: movdqa %xmm2, %xmm3
-; X32-SSE-NEXT: pandn %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
+; X32-SSE-NEXT: psllw $4, %xmm3
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT: por %xmm0, %xmm3
+; X32-SSE-NEXT: psllw $5, %xmm1
+; X32-SSE-NEXT: pxor %xmm0, %xmm0
+; X32-SSE-NEXT: pxor %xmm4, %xmm4
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm4
+; X32-SSE-NEXT: pand %xmm4, %xmm3
+; X32-SSE-NEXT: pandn %xmm2, %xmm4
+; X32-SSE-NEXT: por %xmm3, %xmm4
+; X32-SSE-NEXT: movdqa %xmm4, %xmm2
+; X32-SSE-NEXT: psrlw $6, %xmm2
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X32-SSE-NEXT: movdqa %xmm4, %xmm3
+; X32-SSE-NEXT: psllw $2, %xmm3
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm3
+; X32-SSE-NEXT: por %xmm2, %xmm3
+; X32-SSE-NEXT: paddb %xmm1, %xmm1
+; X32-SSE-NEXT: pxor %xmm2, %xmm2
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2
+; X32-SSE-NEXT: pand %xmm2, %xmm3
+; X32-SSE-NEXT: pandn %xmm4, %xmm2
+; X32-SSE-NEXT: por %xmm3, %xmm2
+; X32-SSE-NEXT: movdqa %xmm2, %xmm3
+; X32-SSE-NEXT: paddb %xmm2, %xmm3
+; X32-SSE-NEXT: movdqa %xmm2, %xmm4
+; X32-SSE-NEXT: psrlw $7, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: por %xmm3, %xmm4
+; X32-SSE-NEXT: paddb %xmm1, %xmm1
+; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm0
+; X32-SSE-NEXT: pand %xmm0, %xmm4
+; X32-SSE-NEXT: pandn %xmm2, %xmm0
; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: retl
%splat = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
%splat8 = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %splat
@@ -1653,170 +1580,171 @@ define <8 x i16> @constant_rotate_v8i16(
define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: constant_rotate_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: psllw $4, %xmm4
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $4, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: movdqa %xmm1, %xmm5
+; SSE2-NEXT: psllw $4, %xmm5
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm5
+; SSE2-NEXT: por %xmm4, %xmm5
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pandn %xmm1, %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: psrlw $6, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm4
+; SSE2-NEXT: psllw $2, %xmm4
; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: por %xmm1, %xmm4
+; SSE2-NEXT: paddb %xmm2, %xmm2
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm1
+; SSE2-NEXT: pandn %xmm3, %xmm1
; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: pandn %xmm1, %xmm5
-; SSE2-NEXT: psllw $2, %xmm1
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: por %xmm5, %xmm1
-; SSE2-NEXT: paddb %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm3
-; SSE2-NEXT: pandn %xmm1, %xmm3
-; SSE2-NEXT: paddb %xmm1, %xmm1
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [57600,41152,24704,8256,8192,24640,41088,57536]
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: psrlw $4, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm5
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: pandn %xmm0, %xmm6
-; SSE2-NEXT: psrlw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm5, %xmm0
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: paddb %xmm4, %xmm4
-; SSE2-NEXT: pcmpgtb %xmm4, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psrlw $1, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: paddb %xmm1, %xmm3
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: psrlw $7, %xmm4
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm4
+; SSE2-NEXT: por %xmm3, %xmm4
+; SSE2-NEXT: paddb %xmm2, %xmm2
+; SSE2-NEXT: pcmpgtb %xmm2, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: pandn %xmm1, %xmm0
; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: por %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: constant_rotate_v16i8:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: psllw $4, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: psrlw $4, %xmm0
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psllw $4, %xmm2
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT: por %xmm0, %xmm2
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: psllw $2, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm2, %xmm3
-; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psrlw $4, %xmm3
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [57600,41152,24704,8256,8192,24640,41088,57536]
-; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: psrlw $6, %xmm2
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psrlw $2, %xmm3
+; SSE41-NEXT: psllw $2, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: paddb %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: psrlw $1, %xmm3
+; SSE41-NEXT: psrlw $7, %xmm3
; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
+; SSE41-NEXT: por %xmm2, %xmm3
; SSE41-NEXT: paddb %xmm0, %xmm0
; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1
-; SSE41-NEXT: por %xmm2, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: constant_rotate_v16i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpsllw $2, %xmm1, %xmm3
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpsrlw $6, %xmm0, %xmm1
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vpsllw $2, %xmm0, %xmm3
; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
-; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm3
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX-NEXT: vpor %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX-NEXT: vpaddb %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512F-LABEL: constant_rotate_v16i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512F-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
-; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512F-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512F-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: vpsrlw $4, %xmm0, %xmm1
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512F-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vpsrlw $6, %xmm0, %xmm1
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512F-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT: vpor %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v16i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm1
-; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512VL-NEXT: vpor %xmm0, %xmm1, %xmm0
-; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: vpsrlw $4, %xmm0, %xmm1
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT: vpsllw $4, %xmm0, %xmm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-NEXT: vpor %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; AVX512VL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpsrlw $6, %xmm0, %xmm1
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT: vpaddb %xmm0, %xmm0, %xmm1
+; AVX512VL-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-NEXT: vpor %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT: vpaddb %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
-; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v16i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm1
+; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm1
; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
-; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
-; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
;
@@ -1827,60 +1755,45 @@ define <16 x i8> @constant_rotate_v16i8(
;
; X32-SSE-LABEL: constant_rotate_v16i8:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm1
-; X32-SSE-NEXT: movdqa %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $4, %xmm4
+; X32-SSE-NEXT: movdqa %xmm0, %xmm1
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,57600,41152,24704,8256]
+; X32-SSE-NEXT: pxor %xmm0, %xmm0
+; X32-SSE-NEXT: pxor %xmm3, %xmm3
+; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
+; X32-SSE-NEXT: movdqa %xmm1, %xmm4
+; X32-SSE-NEXT: psrlw $4, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: movdqa %xmm1, %xmm5
+; X32-SSE-NEXT: psllw $4, %xmm5
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5
+; X32-SSE-NEXT: por %xmm4, %xmm5
+; X32-SSE-NEXT: pand %xmm3, %xmm5
+; X32-SSE-NEXT: pandn %xmm1, %xmm3
+; X32-SSE-NEXT: por %xmm5, %xmm3
+; X32-SSE-NEXT: movdqa %xmm3, %xmm1
+; X32-SSE-NEXT: psrlw $6, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: movdqa %xmm3, %xmm4
+; X32-SSE-NEXT: psllw $2, %xmm4
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: por %xmm1, %xmm4
+; X32-SSE-NEXT: paddb %xmm2, %xmm2
+; X32-SSE-NEXT: pxor %xmm1, %xmm1
+; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm1
; X32-SSE-NEXT: pand %xmm1, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm1
+; X32-SSE-NEXT: pandn %xmm3, %xmm1
; X32-SSE-NEXT: por %xmm4, %xmm1
-; X32-SSE-NEXT: paddb %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm5
-; X32-SSE-NEXT: pandn %xmm1, %xmm5
-; X32-SSE-NEXT: psllw $2, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: pand %xmm4, %xmm1
-; X32-SSE-NEXT: por %xmm5, %xmm1
-; X32-SSE-NEXT: paddb %xmm3, %xmm3
-; X32-SSE-NEXT: pxor %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa %xmm4, %xmm3
-; X32-SSE-NEXT: pandn %xmm1, %xmm3
-; X32-SSE-NEXT: paddb %xmm1, %xmm1
-; X32-SSE-NEXT: pand %xmm4, %xmm1
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [57600,41152,24704,8256,8192,24640,41088,57536]
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pxor %xmm5, %xmm5
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm5
-; X32-SSE-NEXT: movdqa %xmm5, %xmm6
-; X32-SSE-NEXT: pandn %xmm0, %xmm6
-; X32-SSE-NEXT: psrlw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm5, %xmm0
-; X32-SSE-NEXT: por %xmm6, %xmm0
-; X32-SSE-NEXT: paddb %xmm4, %xmm4
-; X32-SSE-NEXT: pcmpgtb %xmm4, %xmm2
-; X32-SSE-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psrlw $1, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
+; X32-SSE-NEXT: movdqa %xmm1, %xmm3
+; X32-SSE-NEXT: paddb %xmm1, %xmm3
+; X32-SSE-NEXT: movdqa %xmm1, %xmm4
+; X32-SSE-NEXT: psrlw $7, %xmm4
+; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X32-SSE-NEXT: por %xmm3, %xmm4
+; X32-SSE-NEXT: paddb %xmm2, %xmm2
+; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm0
+; X32-SSE-NEXT: pand %xmm0, %xmm4
+; X32-SSE-NEXT: pandn %xmm1, %xmm0
; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: por %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
%lshr = lshr <16 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
@@ -2057,29 +1970,29 @@ define <16 x i8> @splatconstant_rotate_v
; SSE-LABEL: splatconstant_rotate_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psllw $4, %xmm1
+; SSE-NEXT: psrlw $4, %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: psrlw $4, %xmm0
+; SSE-NEXT: psllw $4, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_v16i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_v16i8:
@@ -2090,9 +2003,9 @@ define <16 x i8> @splatconstant_rotate_v
; X32-SSE-LABEL: splatconstant_rotate_v16i8:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psllw $4, %xmm1
+; X32-SSE-NEXT: psrlw $4, %xmm1
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: psllw $4, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: retl
@@ -2286,36 +2199,32 @@ define <16 x i8> @splatconstant_rotate_m
; SSE-LABEL: splatconstant_rotate_mask_v16i8:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psllw $4, %xmm1
+; SSE-NEXT: psrlw $4, %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: psrlw $4, %xmm0
+; SSE-NEXT: psllw $4, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: pand {{.*}}(%rip), %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: splatconstant_rotate_mask_v16i8:
; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v16i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1
+; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
+; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
; XOP-LABEL: splatconstant_rotate_mask_v16i8:
@@ -2327,14 +2236,12 @@ define <16 x i8> @splatconstant_rotate_m
; X32-SSE-LABEL: splatconstant_rotate_mask_v16i8:
; X32-SSE: # %bb.0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE-NEXT: psllw $4, %xmm1
+; X32-SSE-NEXT: psrlw $4, %xmm1
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: psrlw $4, %xmm0
+; X32-SSE-NEXT: psllw $4, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: por %xmm1, %xmm0
; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: por %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
; X32-SSE-NEXT: retl
%shl = shl <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%lshr = lshr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll?rev=335957&r1=335956&r2=335957&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll Fri Jun 29 02:36:39 2018
@@ -325,183 +325,154 @@ define <16 x i16> @var_rotate_v16i16(<16
define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX1-LABEL: var_rotate_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpsubb %xmm4, %xmm3, %xmm9
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpsllw $4, %xmm5, %xmm6
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpsllw $5, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm5, %xmm6
-; AVX1-NEXT: vpsllw $2, %xmm6, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm6, %xmm2
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7
+; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm4
-; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6
-; AVX1-NEXT: vpand %xmm3, %xmm6, %xmm3
+; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpaddb %xmm3, %xmm3, %xmm4
+; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-NEXT: vpsrlw $4, %xmm5, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $5, %xmm9, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm5, %xmm2
-; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpsllw $5, %xmm8, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_rotate_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX2-NEXT: vpsubb %ymm1, %ymm2, %ymm2
-; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX2-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_rotate_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %ymm1, %ymm2, %ymm2
-; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX512F-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %ymm1, %ymm2, %ymm2
-; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX512VL-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v32i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512BW-NEXT: vpsubb %ymm1, %ymm2, %ymm2
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
-; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm3, %ymm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: var_rotate_v32i8:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm2
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
-; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512VLBW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm3, %ymm1
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512VLBW-NEXT: retq
;
; XOPAVX1-LABEL: var_rotate_v32i8:
@@ -805,180 +776,155 @@ define <32 x i8> @splatvar_rotate_v32i8(
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpsllw $4, %xmm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
+; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm1, %xmm4, %xmm3, %xmm4
-; AVX1-NEXT: vpsllw $2, %xmm4, %xmm6
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm6
-; AVX1-NEXT: vpand %xmm9, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm1, %xmm6, %xmm0, %xmm1
-; AVX1-NEXT: vpsllw $2, %xmm1, %xmm6
-; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm2, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm10
-; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm9, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $5, %xmm8, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm1
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT: vpand %xmm11, %xmm7, %xmm7
+; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm7
+; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm5
+; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm5
+; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm1
+; AVX1-NEXT: vpand %xmm10, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpblendvb %xmm7, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm1
+; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpblendvb %xmm5, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm10, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_rotate_v32i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX2-NEXT: vpsubb %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX2-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX2-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX2-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: splatvar_rotate_v32i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %ymm1, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX512F-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v32i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm3
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm3
-; AVX512VL-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm4
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512VL-NEXT: vpblendvb %ymm1, %ymm4, %ymm3, %ymm1
-; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm3
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_rotate_v32i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512BW-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512BW-NEXT: vpsubb %ymm1, %ymm3, %ymm1
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
-; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: splatvar_rotate_v32i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm2, %ymm2
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2
+; AVX512VLBW-NEXT: vpmovwb %zmm2, %ymm2
+; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
+; AVX512VLBW-NEXT: vpsubb %ymm1, %ymm3, %ymm1
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
-; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
-; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
-; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT: vpor %ymm0, %ymm2, %ymm0
; AVX512VLBW-NEXT: retq
;
; XOPAVX1-LABEL: splatvar_rotate_v32i8:
@@ -1249,158 +1195,141 @@ define <32 x i8> @constant_rotate_v32i8(
; AVX1-LABEL: constant_rotate_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm1, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
+; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm2
-; AVX1-NEXT: vpsllw $2, %xmm2, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
+; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $6, %xmm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX1-NEXT: vpand %xmm10, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm1, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX1-NEXT: vpand %xmm11, %xmm7, %xmm7
+; AVX1-NEXT: vpor %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm5
-; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm3
-; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
-; AVX1-NEXT: vpand %xmm8, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm0, %xmm4
-; AVX1-NEXT: vpsllw $2, %xmm4, %xmm5
-; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm5
-; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm4, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm9
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm2
-; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm7, %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm3
-; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpblendvb %xmm2, %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm6
+; AVX1-NEXT: vpor %xmm2, %xmm6, %xmm2
+; AVX1-NEXT: vpaddb %xmm7, %xmm7, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX1-NEXT: vpand %xmm8, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm2
+; AVX1-NEXT: vpand %xmm10, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm2
+; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm3
+; AVX1-NEXT: vpor %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm9, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: constant_rotate_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
-; AVX2-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX2-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: constant_rotate_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
-; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm1
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm1
-; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm1
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm1
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v32i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512VLBW-LABEL: constant_rotate_v32i8:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
-; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
+; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm1
; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512VLBW-NEXT: retq
;
; XOPAVX1-LABEL: constant_rotate_v32i8:
@@ -1612,37 +1541,37 @@ define <32 x i8> @splatconstant_rotate_v
; AVX1-LABEL: splatconstant_rotate_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_v32i8:
@@ -1869,43 +1798,40 @@ define <32 x i8> @splatconstant_rotate_m
; AVX1-LABEL: splatconstant_rotate_mask_v32i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
-; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
-; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm2, %ymm1
-; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatconstant_rotate_mask_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: splatconstant_rotate_mask_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $4, %ymm0, %ymm1
+; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm1
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpor %ymm0, %ymm1, %ymm0
; AVX512-NEXT: retq
;
; XOPAVX1-LABEL: splatconstant_rotate_mask_v32i8:
Modified: llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll?rev=335957&r1=335956&r2=335957&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-rotate-512.ll Fri Jun 29 02:36:39 2018
@@ -106,122 +106,100 @@ define <32 x i16> @var_rotate_v32i16(<32
define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: var_rotate_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %ymm2, %ymm5, %ymm4
-; AVX512F-NEXT: vpsubb %ymm3, %ymm5, %ymm5
-; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm6
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm6
-; AVX512F-NEXT: vpsllw $2, %ymm6, %ymm8
+; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm6
-; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm3
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm6
-; AVX512F-NEXT: vpsllw $2, %ymm6, %ymm7
-; AVX512F-NEXT: vpand %ymm9, %ymm7, %ymm7
+; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm6
-; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm2
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512F-NEXT: vpsllw $5, %ymm5, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512F-NEXT: vpand %ymm8, %ymm6, %ymm6
-; AVX512F-NEXT: vpaddb %ymm5, %ymm5, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm9, %ymm6, %ymm6
-; AVX512F-NEXT: vpaddb %ymm5, %ymm5, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm3
-; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
-; AVX512F-NEXT: vpsllw $5, %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm3
-; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm3
-; AVX512F-NEXT: vpand %ymm9, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
+; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
+; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %ymm2, %ymm5, %ymm4
-; AVX512VL-NEXT: vpsubb %ymm3, %ymm5, %ymm5
-; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm6
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm6, %ymm1, %ymm6
-; AVX512VL-NEXT: vpsllw $2, %ymm6, %ymm8
+; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
+; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm6
-; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm8, %ymm6, %ymm3
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm6, %ymm0, %ymm6
-; AVX512VL-NEXT: vpsllw $2, %ymm6, %ymm7
-; AVX512VL-NEXT: vpand %ymm9, %ymm7, %ymm7
+; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm6
-; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm7
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
-; AVX512VL-NEXT: vpblendvb %ymm2, %ymm7, %ymm6, %ymm2
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512VL-NEXT: vpsllw $5, %ymm5, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512VL-NEXT: vpand %ymm8, %ymm6, %ymm6
-; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm6
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-NEXT: vpand %ymm9, %ymm6, %ymm6
-; AVX512VL-NEXT: vpaddb %ymm5, %ymm5, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm5, %ymm6, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm5
-; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
-; AVX512VL-NEXT: vpsllw $5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm5
-; AVX512VL-NEXT: vpand %ymm8, %ymm5, %ymm5
-; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm5
-; AVX512VL-NEXT: vpand %ymm9, %ymm5, %ymm5
-; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm4, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
-; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
+; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
+; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
+; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
+; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: var_rotate_v64i8:
@@ -396,109 +374,95 @@ define <64 x i8> @splatvar_rotate_v64i8(
; AVX512F-LABEL: splatvar_rotate_v64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512F-NEXT: vpsubb %ymm2, %ymm3, %ymm3
-; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm1, %ymm4
-; AVX512F-NEXT: vpsllw $2, %ymm4, %ymm6
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
-; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm8
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm6
-; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm9
-; AVX512F-NEXT: vpblendvb %ymm9, %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
-; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm2, %ymm5, %ymm0, %ymm2
-; AVX512F-NEXT: vpsllw $2, %ymm2, %ymm5
-; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm5
-; AVX512F-NEXT: vpblendvb %ymm9, %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm3
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm5
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm8
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm9, %ymm5, %ymm5
-; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm10
-; AVX512F-NEXT: vpblendvb %ymm10, %ymm5, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm1, %ymm4, %ymm1
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm3, %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm3
-; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm3
+; AVX512F-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm3
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpand %ymm8, %ymm7, %ymm7
+; AVX512F-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm7
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpand %ymm9, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512F-NEXT: vpor %ymm3, %ymm10, %ymm3
+; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512F-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm3
+; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
+; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatvar_rotate_v64i8:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
-; AVX512VL-NEXT: vpsubb %ymm2, %ymm3, %ymm3
-; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm6
-; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm9
-; AVX512VL-NEXT: vpblendvb %ymm9, %ymm4, %ymm2, %ymm2
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpsllw $2, %ymm4, %ymm5
-; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm9, %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
-; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm5
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512VL-NEXT: vpand %ymm7, %ymm5, %ymm5
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm8
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm5
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-NEXT: vpand %ymm9, %ymm5, %ymm5
-; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm10
-; AVX512VL-NEXT: vpblendvb %ymm10, %ymm5, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm5
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm3
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm5
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
-; AVX512VL-NEXT: vpblendvb %ymm3, %ymm5, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm3
-; AVX512VL-NEXT: vpand %ymm7, %ymm3, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm3
+; AVX512VL-NEXT: vpor %ymm3, %ymm5, %ymm3
+; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm3
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpand %ymm8, %ymm7, %ymm7
+; AVX512VL-NEXT: vpor %ymm3, %ymm7, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm7
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm3
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512VL-NEXT: vpand %ymm9, %ymm3, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512VL-NEXT: vpor %ymm3, %ymm10, %ymm3
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512VL-NEXT: vpblendvb %ymm10, %ymm3, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm4, %ymm0
-; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm3
+; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
+; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT: vpor %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatvar_rotate_v64i8:
@@ -651,104 +615,94 @@ define <32 x i16> @constant_rotate_v32i1
define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: constant_rotate_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
-; AVX512F-NEXT: vpsllw $2, %ymm2, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512F-NEXT: vpand %ymm8, %ymm7, %ymm7
+; AVX512F-NEXT: vpor %ymm2, %ymm7, %ymm2
; AVX512F-NEXT: vpaddb %ymm4, %ymm4, %ymm7
-; AVX512F-NEXT: vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm5
-; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm8
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm5
-; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
-; AVX512F-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm3
-; AVX512F-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm4
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX512F-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $2, %ymm1, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm4
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm8, %ymm8, %ymm10
-; AVX512F-NEXT: vpblendvb %ymm10, %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512F-NEXT: vpor %ymm2, %ymm10, %ymm2
+; AVX512F-NEXT: vpaddb %ymm7, %ymm7, %ymm10
; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm3
+; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
+; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256]
-; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm2
-; AVX512VL-NEXT: vpsllw $2, %ymm2, %ymm5
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512VL-NEXT: vpand %ymm8, %ymm7, %ymm7
+; AVX512VL-NEXT: vpor %ymm2, %ymm7, %ymm2
; AVX512VL-NEXT: vpaddb %ymm4, %ymm4, %ymm7
-; AVX512VL-NEXT: vpblendvb %ymm7, %ymm5, %ymm2, %ymm2
-; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm5
-; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm8
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm5, %ymm2, %ymm2
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm5
-; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
-; AVX512VL-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm3
-; AVX512VL-NEXT: vpsllw $2, %ymm3, %ymm4
-; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [57600,41152,24704,8256,8192,24640,41088,57536,57600,41152,24704,8256,8192,24640,41088,57536]
-; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $2, %ymm1, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm6, %ymm6, %ymm8
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm4
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512VL-NEXT: vpaddb %ymm8, %ymm8, %ymm10
-; AVX512VL-NEXT: vpblendvb %ymm10, %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $2, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm8, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $1, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
-; AVX512VL-NEXT: vpblendvb %ymm10, %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
+; AVX512VL-NEXT: vpor %ymm2, %ymm10, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm7, %ymm7, %ymm10
+; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
+; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm3
+; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
+; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm3
+; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2
+; AVX512VL-NEXT: vpblendvb %ymm10, %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: constant_rotate_v64i8:
@@ -891,34 +845,34 @@ define <32 x i16> @splatconstant_rotate_
define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm2
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4
-; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm2
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4
-; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
-; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
+; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_v64i8:
@@ -1031,38 +985,40 @@ define <32 x i16> @splatconstant_rotate_
define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm3
-; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
-; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
+; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm3
-; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm0
-; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm1
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55]
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33]
-; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm4, %ymm4
-; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
-; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
-; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
+; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
+; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
+; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v64i8:
More information about the llvm-commits
mailing list