[llvm] r327242 - [X86][AVX] createVariablePermute - use PSHUFB+PCMPGT+SELECT for v32i8 variable permutes
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 11 09:28:12 PDT 2018
Author: rksimon
Date: Sun Mar 11 09:28:11 2018
New Revision: 327242
URL: http://llvm.org/viewvc/llvm-project?rev=327242&view=rev
Log:
[X86][AVX] createVariablePermute - use PSHUFB+PCMPGT+SELECT for v32i8 variable permutes
Same as the VPERMILPS/VPERMILPD approach for v8f32/v4f64 cases, rely on PSHUFB using bits[3:0] for indexing - we can ignore the sign bit (zero element) as those index vector values are considered undefined. The select between the lo/hi permute results based on the index size.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/var-permute-256.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=327242&r1=327241&r2=327242&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Mar 11 09:28:11 2018
@@ -8027,6 +8027,26 @@ SDValue createVariablePermute(MVT VT, SD
ISD::CONCAT_VECTORS, DL, VT,
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
+ } else if (Subtarget.hasAVX()) {
+ SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
+ SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
+ SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
+ SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
+ auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
+ ArrayRef<SDValue> Ops) {
+ // Permute Lo and Hi and then select based on index range.
+ // This works as SHUFB uses bits[3:0] to permute elements and we don't
+ // care about the bit[7] as its just an index vector.
+ SDValue Idx = Ops[2];
+ EVT VT = Idx.getValueType();
+ return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
+ DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
+ DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
+ ISD::CondCode::SETGT);
+ };
+ SDValue Ops[] = {LoLo, HiHi, IndicesVec};
+ return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
+ PSHUFBBuilder);
}
break;
case MVT::v16i16:
Modified: llvm/trunk/test/CodeGen/X86/var-permute-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/var-permute-256.ll?rev=327242&r1=327241&r2=327242&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/var-permute-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/var-permute-256.ll Sun Mar 11 09:28:11 2018
@@ -541,637 +541,62 @@ define <32 x i8> @var_shuffle_v32i8(<32
;
; AVX1-LABEL: var_shuffle_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: pushq %rbp
-; AVX1-NEXT: movq %rsp, %rbp
-; AVX1-NEXT: andq $-32, %rsp
-; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpextrb $0, %xmm2, %eax
-; AVX1-NEXT: vmovaps %ymm0, (%rsp)
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: vpextrb $1, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $2, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $3, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $4, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $5, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $6, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $7, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $8, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $9, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $10, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $11, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $12, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $13, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $14, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $15, %xmm2, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vmovd %eax, %xmm2
-; AVX1-NEXT: vpextrb $1, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $1, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $2, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $2, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $3, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $3, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $4, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $4, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $5, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $5, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $6, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $6, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $7, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $7, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $8, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $8, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $9, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $9, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $10, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $10, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $11, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $11, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $12, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $12, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $13, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $13, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $14, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: vpinsrb $14, (%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $15, %xmm1, %eax
-; AVX1-NEXT: andl $31, %eax
-; AVX1-NEXT: movzbl (%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-NEXT: movq %rbp, %rsp
-; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT: vpshufb %xmm2, %xmm5, %xmm6
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm4
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: pushq %rbp
-; AVX2-NEXT: movq %rsp, %rbp
-; AVX2-NEXT: andq $-32, %rsp
-; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpextrb $0, %xmm2, %eax
-; AVX2-NEXT: vmovaps %ymm0, (%rsp)
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vpextrb $1, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $2, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $3, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $4, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $5, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $6, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $7, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $8, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $9, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $10, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $11, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $12, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $13, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $14, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $15, %xmm2, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vpextrb $1, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $1, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $2, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $2, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $3, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $3, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $4, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $5, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $5, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $6, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $7, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $7, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $8, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $9, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $9, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $10, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $11, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $11, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $12, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $13, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $13, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: vpinsrb $14, (%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $15, %xmm1, %eax
-; AVX2-NEXT: andl $31, %eax
-; AVX2-NEXT: movzbl (%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-NEXT: movq %rbp, %rsp
-; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2
+; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: pushq %rbp
-; AVX512F-NEXT: movq %rsp, %rbp
-; AVX512F-NEXT: andq $-32, %rsp
-; AVX512F-NEXT: subq $64, %rsp
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512F-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vmovd %eax, %xmm0
-; AVX512F-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $1, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $2, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $3, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $4, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $5, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $6, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $7, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $8, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $9, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $10, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $11, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $12, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $13, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: vpinsrb $14, (%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512F-NEXT: andl $31, %eax
-; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512F-NEXT: movq %rbp, %rsp
-; AVX512F-NEXT: popq %rbp
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2
+; AVX512F-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: var_shuffle_v32i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: pushq %rbp
-; AVX512DQ-NEXT: movq %rsp, %rbp
-; AVX512DQ-NEXT: andq $-32, %rsp
-; AVX512DQ-NEXT: subq $64, %rsp
-; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512DQ-NEXT: vmovaps %ymm0, (%rsp)
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vmovd %eax, %xmm0
-; AVX512DQ-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vmovd %eax, %xmm2
-; AVX512DQ-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $1, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $2, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $3, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $4, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $5, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $6, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $7, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $8, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $9, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $10, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $11, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $12, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $13, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: vpinsrb $14, (%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512DQ-NEXT: andl $31, %eax
-; AVX512DQ-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512DQ-NEXT: movq %rbp, %rsp
-; AVX512DQ-NEXT: popq %rbp
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: pushq %rbp
-; AVX512VL-NEXT: movq %rsp, %rbp
-; AVX512VL-NEXT: andq $-32, %rsp
-; AVX512VL-NEXT: subq $64, %rsp
-; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512VL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512VL-NEXT: vmovaps %ymm0, (%rsp)
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm0
-; AVX512VL-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $1, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $2, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $3, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $4, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $5, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $6, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $7, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $8, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $9, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $10, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $11, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $12, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $13, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: vpinsrb $14, (%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512VL-NEXT: andl $31, %eax
-; AVX512VL-NEXT: movzbl (%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512VL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512VL-NEXT: movq %rbp, %rsp
-; AVX512VL-NEXT: popq %rbp
+; AVX512VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm2, %ymm2
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm3
+; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0
; AVX512VL-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v32i8:
@@ -1928,606 +1353,56 @@ define <32 x i8> @var_shuffle_v32i8_from
; AVX1-LABEL: var_shuffle_v32i8_from_v16i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vpextrb $0, %xmm2, %eax
-; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vmovd %eax, %xmm0
-; AVX1-NEXT: vpextrb $1, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $2, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $3, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $4, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $5, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $6, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $7, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $8, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $9, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $10, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $11, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $12, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $13, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $14, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $15, %xmm2, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX1-NEXT: vpextrb $0, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vmovd %eax, %xmm2
-; AVX1-NEXT: vpextrb $1, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $1, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $2, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $2, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $3, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $3, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $4, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $4, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $5, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $5, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $6, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $7, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $8, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $9, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $10, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $11, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $12, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $13, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $14, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX1-NEXT: vpextrb $15, %xmm1, %eax
-; AVX1-NEXT: andl $15, %eax
-; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm5
+; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm3
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm4
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpblendvb %xmm3, %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_v32i8_from_v16i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpextrb $0, %xmm2, %eax
-; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vmovd %eax, %xmm0
-; AVX2-NEXT: vpextrb $1, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $2, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $3, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $4, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $5, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $6, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $7, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $8, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $9, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $10, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $11, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $12, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $13, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $14, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $15, %xmm2, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX2-NEXT: vpextrb $0, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vmovd %eax, %xmm2
-; AVX2-NEXT: vpextrb $1, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $1, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $2, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $2, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $3, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $3, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $4, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $4, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $5, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $5, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $6, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $7, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $8, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $9, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $10, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $11, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $12, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $13, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $14, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX2-NEXT: vpextrb $15, %xmm1, %eax
-; AVX2-NEXT: andl $15, %eax
-; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm2
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_v32i8_from_v16i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512F-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512F-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vmovd %eax, %xmm0
-; AVX512F-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512F-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vmovd %eax, %xmm2
-; AVX512F-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $1, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $2, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $3, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $4, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $5, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512F-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512F-NEXT: andl $15, %eax
-; AVX512F-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512F-NEXT: vpshufb %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512DQ-LABEL: var_shuffle_v32i8_from_v16i8:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512DQ-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vmovd %eax, %xmm0
-; AVX512DQ-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512DQ-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vmovd %eax, %xmm2
-; AVX512DQ-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $1, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $2, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $3, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $4, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $5, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512DQ-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512DQ-NEXT: andl $15, %eax
-; AVX512DQ-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_v32i8_from_v16i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512VL-NEXT: vpextrb $0, %xmm2, %eax
-; AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm0
-; AVX512VL-NEXT: vpextrb $1, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $2, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $3, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $4, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $5, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $6, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $7, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $8, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $9, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $10, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $11, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $12, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $13, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $14, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $15, %xmm2, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512VL-NEXT: vpextrb $0, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vmovd %eax, %xmm2
-; AVX512VL-NEXT: vpextrb $1, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $1, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $2, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $2, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $3, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $3, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $4, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $4, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $5, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $5, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $6, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $6, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $7, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $7, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $8, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $8, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $9, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $9, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $10, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $10, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $11, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $11, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $12, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $12, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $13, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $13, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $14, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: vpinsrb $14, -24(%rsp,%rax), %xmm2, %xmm2
-; AVX512VL-NEXT: vpextrb $15, %xmm1, %eax
-; AVX512VL-NEXT: andl $15, %eax
-; AVX512VL-NEXT: movzbl -24(%rsp,%rax), %eax
-; AVX512VL-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
-; AVX512VL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512VL-NEXT: vpshufb %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; VBMI-LABEL: var_shuffle_v32i8_from_v16i8:
More information about the llvm-commits
mailing list