[llvm] 88eae6e - [DAGCombine]Expand usage of CreateBuildVecShuffle to make full use of vector ops

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 22 19:45:46 PST 2023


Author: Wang, Xin10
Date: 2023-01-23T11:45:38+08:00
New Revision: 88eae6ef9fb527bfd979c1672e2ed07f77136fbd

URL: https://github.com/llvm/llvm-project/commit/88eae6ef9fb527bfd979c1672e2ed07f77136fbd
DIFF: https://github.com/llvm/llvm-project/commit/88eae6ef9fb527bfd979c1672e2ed07f77136fbd.diff

LOG: [DAGCombine]Expand usage of CreateBuildVecShuffle to make full use of vector ops

Now, when llc encounters the case that contains a lot of
extract_vector_elt and a BUILD_VECTOR, it will replace these to
vector_shuffle to decrease the size of code, the actions are done in
createBuildVecShuffle in DAGCombiner.cpp, but now the code cannot handle
the case that the size of source vector reg is more than twice the dest
size.

Reviewed By: pengfei

Differential Revision: https://reviews.llvm.org/D139685

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/X86/avg.ll
    llvm/test/CodeGen/X86/pr29112.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 120515fd0a2c3..42f6221e402fb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -21180,6 +21180,29 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
       SmallVector<SDValue, 2> ConcatOps(2, DAG.getUNDEF(InVT2));
       ConcatOps[0] = VecIn2;
       VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
+    } else if (InVT1Size / VTSize > 1 && InVT1Size % VTSize == 0) {
+      if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems) ||
+          !TLI.isTypeLegal(InVT1) || !TLI.isTypeLegal(InVT2))
+        return SDValue();
+      // If dest vector has less than two elements, then use shuffle and extract
+      // from larger regs will cost even more.
+      if (VT.getVectorNumElements() <= 2 || !VecIn2.getNode())
+        return SDValue();
+      assert(InVT2Size <= InVT1Size &&
+             "Second input is not going to be larger than the first one.");
+
+      // VecIn1 is wider than the output, and we have another, possibly
+      // smaller input. Pad the smaller input with undefs, shuffle at the
+      // input vector width, and extract the output.
+      // The shuffle type is 
diff erent than VT, so check legality again.
+      if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
+        return SDValue();
+
+      if (InVT1 != InVT2) {
+        VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
+                             DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
+      }
+      ShuffleNumElems = InVT1Size / VTSize * NumElems;
     } else {
       // TODO: Support cases where the length mismatch isn't exactly by a
       // factor of 2.

diff  --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 216c2226b384c..72c9948c2f591 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -2253,257 +2253,161 @@ define void @not_avg_v16i8_wide_constants(ptr %a, ptr %b) nounwind {
 ; AVX512-NEXT:    pushq %r13
 ; AVX512-NEXT:    pushq %r12
 ; AVX512-NEXT:    pushq %rbx
-; AVX512-NEXT:    subq $24, %rsp
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm4, %xmm0
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512-NEXT:    vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT:    vpextrq $1, %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; AVX512-NEXT:    vpextrq $1, %xmm4, %rbp
+; AVX512-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
 ; AVX512-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512-NEXT:    vmovq %xmm4, %r13
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512-NEXT:    vmovq %xmm4, %rdi
+; AVX512-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %r8
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512-NEXT:    vmovq %xmm3, %r9
+; AVX512-NEXT:    vpextrq $1, %xmm3, %r10
+; AVX512-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %r11
+; AVX512-NEXT:    vpextrq $1, %xmm3, %rbx
+; AVX512-NEXT:    vpextrq $1, %xmm5, %rax
 ; AVX512-NEXT:    vpextrq $1, %xmm4, %r12
-; AVX512-NEXT:    vmovq %xmm3, %r15
-; AVX512-NEXT:    vpextrq $1, %xmm3, %r14
+; AVX512-NEXT:    vpextrq $1, %xmm1, %r15
+; AVX512-NEXT:    vpextrq $1, %xmm0, %r14
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm5, %xmm3
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-NEXT:    vmovq %xmm2, %rbx
+; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512-NEXT:    vpextrq $1, %xmm8, %rsi
+; AVX512-NEXT:    addq %rax, %rsi
+; AVX512-NEXT:    vpextrq $1, %xmm7, %rdx
+; AVX512-NEXT:    addq %r12, %rdx
+; AVX512-NEXT:    vpextrq $1, %xmm4, %rcx
+; AVX512-NEXT:    addq %r15, %rcx
+; AVX512-NEXT:    vpextrq $1, %xmm3, %rax
+; AVX512-NEXT:    addq %r14, %rax
+; AVX512-NEXT:    vpextrq $1, %xmm9, %r14
+; AVX512-NEXT:    leaq -1(%rbx,%r14), %r13
+; AVX512-NEXT:    vmovq %xmm9, %rbx
+; AVX512-NEXT:    leaq -1(%r11,%rbx), %r12
 ; AVX512-NEXT:    vpextrq $1, %xmm2, %r11
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512-NEXT:    leaq -1(%r10,%r11), %r15
 ; AVX512-NEXT:    vmovq %xmm2, %r10
-; AVX512-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512-NEXT:    vmovq %xmm2, %rdi
-; AVX512-NEXT:    vpextrq $1, %xmm2, %r8
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT:    vmovq %xmm2, %rsi
-; AVX512-NEXT:    vpextrq $1, %xmm2, %rdx
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512-NEXT:    vmovq %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512-NEXT:    vmovq %xmm4, %rbp
-; AVX512-NEXT:    addq %r13, %rbp
-; AVX512-NEXT:    vpextrq $1, %xmm4, %r13
-; AVX512-NEXT:    addq %r12, %r13
-; AVX512-NEXT:    vmovq %xmm3, %rcx
-; AVX512-NEXT:    addq %r15, %rcx
-; AVX512-NEXT:    vpextrq $1, %xmm3, %r9
-; AVX512-NEXT:    addq %r14, %r9
-; AVX512-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512-NEXT:    vmovq %xmm2, %r9
-; AVX512-NEXT:    addq %rbx, %r9
-; AVX512-NEXT:    vpextrq $1, %xmm2, %rbx
-; AVX512-NEXT:    addq %r11, %rbx
-; AVX512-NEXT:    movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT:    vmovq %xmm2, %r11
-; AVX512-NEXT:    addq %r10, %r11
-; AVX512-NEXT:    movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vpextrq $1, %xmm2, %r10
-; AVX512-NEXT:    addq %rax, %r10
-; AVX512-NEXT:    movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT:    vmovq %xmm2, %rax
-; AVX512-NEXT:    addq %rdi, %rax
-; AVX512-NEXT:    movq %rax, %r12
-; AVX512-NEXT:    vpextrq $1, %xmm2, %rax
-; AVX512-NEXT:    addq %r8, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512-NEXT:    vmovq %xmm2, %rax
-; AVX512-NEXT:    addq %rsi, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    vpextrq $1, %xmm2, %r15
-; AVX512-NEXT:    addq %rdx, %r15
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512-NEXT:    vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT:    vmovq %xmm0, %r10
-; AVX512-NEXT:    addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
-; AVX512-NEXT:    vpextrq $1, %xmm0, %r8
-; AVX512-NEXT:    addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
-; AVX512-NEXT:    vmovq %xmm1, %rax
-; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT:    leaq -1(%r9,%r10), %r14
+; AVX512-NEXT:    vmovq %xmm8, %r9
+; AVX512-NEXT:    leaq -1(%r8,%r9), %r11
+; AVX512-NEXT:    vmovq %xmm7, %r8
+; AVX512-NEXT:    leaq -1(%rdi,%r8), %r10
+; AVX512-NEXT:    vpextrq $1, %xmm6, %rdi
+; AVX512-NEXT:    leaq -1(%rbp,%rdi), %r9
+; AVX512-NEXT:    vmovq %xmm6, %rdi
+; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT:    leaq -1(%r8,%rdi), %rdi
+; AVX512-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT:    vpextrq $1, %xmm5, %rdi
+; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT:    leaq -1(%r8,%rdi), %rdi
+; AVX512-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT:    vmovq %xmm5, %rdi
+; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; AVX512-NEXT:    leaq -1(%r8,%rdi), %rdi
+; AVX512-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT:    vmovq %xmm1, %rdi
+; AVX512-NEXT:    vmovq %xmm4, %r8
+; AVX512-NEXT:    leaq -1(%rdi,%r8), %rdi
+; AVX512-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; AVX512-NEXT:    vmovq %xmm0, %rdi
-; AVX512-NEXT:    addq %rax, %rdi
-; AVX512-NEXT:    vpextrq $1, %xmm1, %rsi
-; AVX512-NEXT:    vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT:    addq %rsi, %rdx
-; AVX512-NEXT:    addq $-1, %rbp
-; AVX512-NEXT:    movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    movl $0, %r14d
-; AVX512-NEXT:    adcq $-1, %r14
-; AVX512-NEXT:    addq $-1, %r13
-; AVX512-NEXT:    movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    movl $0, %ebx
-; AVX512-NEXT:    adcq $-1, %rbx
-; AVX512-NEXT:    addq $-1, %rcx
-; AVX512-NEXT:    movq %rcx, (%rsp) # 8-byte Spill
-; AVX512-NEXT:    movl $0, %esi
-; AVX512-NEXT:    adcq $-1, %rsi
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %r11d
-; AVX512-NEXT:    adcq $-1, %r11
-; AVX512-NEXT:    addq $-1, %r9
-; AVX512-NEXT:    movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    movl $0, %r9d
-; AVX512-NEXT:    adcq $-1, %r9
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, %r12
-; AVX512-NEXT:    movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT:    vmovq %xmm3, %r8
+; AVX512-NEXT:    leaq -1(%rdi,%r8), %rdi
+; AVX512-NEXT:    movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT:    xorl %r8d, %r8d
+; AVX512-NEXT:    addq $-1, %rsi
+; AVX512-NEXT:    movl $0, %edi
+; AVX512-NEXT:    adcq $-1, %rdi
+; AVX512-NEXT:    addq $-1, %rdx
 ; AVX512-NEXT:    movl $0, %ebp
 ; AVX512-NEXT:    adcq $-1, %rbp
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, %r15
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    addq $-1, %r10
-; AVX512-NEXT:    movl $0, %r12d
-; AVX512-NEXT:    adcq $-1, %r12
-; AVX512-NEXT:    addq $-1, %r8
-; AVX512-NEXT:    movl $0, %ecx
-; AVX512-NEXT:    adcq $-1, %rcx
-; AVX512-NEXT:    addq $-1, %rdi
-; AVX512-NEXT:    movl $0, %eax
-; AVX512-NEXT:    adcq $-1, %rax
-; AVX512-NEXT:    addq $-1, %rdx
-; AVX512-NEXT:    movl $0, %r13d
-; AVX512-NEXT:    adcq $-1, %r13
-; AVX512-NEXT:    shldq $63, %rdx, %r13
-; AVX512-NEXT:    movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    shldq $63, %rdi, %rax
-; AVX512-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; AVX512-NEXT:    shldq $63, %r8, %rcx
-; AVX512-NEXT:    movq %rcx, %r13
-; AVX512-NEXT:    shldq $63, %r10, %r12
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %r15, %r8
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rdi
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %r10
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rbp
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rdx
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %r15
+; AVX512-NEXT:    addq $-1, %rcx
+; AVX512-NEXT:    movl $0, %ebx
+; AVX512-NEXT:    adcq $-1, %rbx
+; AVX512-NEXT:    addq $-1, %rax
+; AVX512-NEXT:    adcq $-1, %r8
+; AVX512-NEXT:    shldq $63, %rax, %r8
+; AVX512-NEXT:    shldq $63, %rcx, %rbx
+; AVX512-NEXT:    shldq $63, %rdx, %rbp
+; AVX512-NEXT:    shldq $63, %rsi, %rdi
+; AVX512-NEXT:    shrq %r13
+; AVX512-NEXT:    vmovq %r13, %xmm0
+; AVX512-NEXT:    shrq %r12
+; AVX512-NEXT:    vmovq %r12, %xmm1
+; AVX512-NEXT:    shrq %r15
+; AVX512-NEXT:    vmovq %r15, %xmm2
+; AVX512-NEXT:    shrq %r14
+; AVX512-NEXT:    vmovq %r14, %xmm3
+; AVX512-NEXT:    vmovq %rdi, %xmm4
+; AVX512-NEXT:    shrq %r11
+; AVX512-NEXT:    vmovq %r11, %xmm5
+; AVX512-NEXT:    vmovq %rbp, %xmm6
+; AVX512-NEXT:    shrq %r10
+; AVX512-NEXT:    vmovq %r10, %xmm7
+; AVX512-NEXT:    shrq %r9
+; AVX512-NEXT:    vmovq %r9, %xmm8
 ; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rcx
+; AVX512-NEXT:    shrq %rax
+; AVX512-NEXT:    vmovq %rax, %xmm9
 ; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %r9
+; AVX512-NEXT:    shrq %rax
+; AVX512-NEXT:    vmovq %rax, %xmm10
 ; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %r11
-; AVX512-NEXT:    movq (%rsp), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rsi
+; AVX512-NEXT:    shrq %rax
+; AVX512-NEXT:    vmovq %rax, %xmm11
+; AVX512-NEXT:    vmovq %rbx, %xmm12
 ; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %rbx
+; AVX512-NEXT:    shrq %rax
+; AVX512-NEXT:    vmovq %rax, %xmm13
+; AVX512-NEXT:    vmovq %r8, %xmm14
 ; AVX512-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; AVX512-NEXT:    shldq $63, %rax, %r14
-; AVX512-NEXT:    vmovq %r14, %xmm0
-; AVX512-NEXT:    vmovq %rbx, %xmm1
-; AVX512-NEXT:    vmovq %r11, %xmm2
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vmovq %rsi, %xmm1
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512-NEXT:    vmovd %esi, %xmm3
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $1, %eax, %xmm3, %xmm1
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm0
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $3, %eax, %xmm1, %xmm0
-; AVX512-NEXT:    vpinsrb $4, %r9d, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq %r9, %xmm1
-; AVX512-NEXT:    vmovq %rcx, %xmm2
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq %r15, %xmm2
-; AVX512-NEXT:    vmovq %rdx, %xmm3
-; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm1
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrb $8, %ebp, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq %rbp, %xmm1
-; AVX512-NEXT:    vmovq %r10, %xmm2
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq %rdi, %xmm2
-; AVX512-NEXT:    vmovq %r8, %xmm3
-; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm1
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vpinsrb $12, %r12d, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq %r12, %xmm1
-; AVX512-NEXT:    vmovq %r13, %xmm2
-; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 8-byte Folded Reload
-; AVX512-NEXT:    # xmm2 = mem[0],zero
-; AVX512-NEXT:    vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 8-byte Folded Reload
-; AVX512-NEXT:    # xmm3 = mem[0],zero
-; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm2
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm1
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512-NEXT:    shrq %rax
+; AVX512-NEXT:    vmovq %rax, %xmm15
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512-NEXT:    vpbroadcastw %xmm0, %xmm0
+; AVX512-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512-NEXT:    vpbroadcastw %xmm1, %xmm1
+; AVX512-NEXT:    vpbroadcastw %xmm2, %xmm2
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512-NEXT:    vpsllq $48, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
+; AVX512-NEXT:    vpbroadcastw %xmm3, %xmm3
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
 ; AVX512-NEXT:    vmovdqu %xmm0, (%rax)
-; AVX512-NEXT:    addq $24, %rsp
 ; AVX512-NEXT:    popq %rbx
 ; AVX512-NEXT:    popq %r12
 ; AVX512-NEXT:    popq %r13

diff  --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll
index 61f67e959ec6d..b099e0c399404 100644
--- a/llvm/test/CodeGen/X86/pr29112.ll
+++ b/llvm/test/CodeGen/X86/pr29112.ll
@@ -8,57 +8,57 @@ declare <4 x float> @foo(<4 x float>, <4 x float>, <4 x float>, <4 x float>, <4
 define <4 x float> @bar(ptr %a1p, ptr %a2p, <4 x float> %a3, <4 x float> %a4, <16 x float>%c1, <16 x float>%c2) {
 ; CHECK-LABEL: bar:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    subq $72, %rsp
-; CHECK-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-NEXT:    subq $136, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 144
 ; CHECK-NEXT:    vmovaps %xmm1, %xmm13
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [4,22,1,17]
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm5 = [3,20,1,17]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm5
+; CHECK-NEXT:    vunpcklps {{.*#+}} ymm0 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5]
+; CHECK-NEXT:    vpermpd {{.*#+}} ymm1 = ymm0[2,1,2,3]
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [4,21,1,17,4,21,5,21]
 ; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm0
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm12 = [4,30,1,22]
+; CHECK-NEXT:    vmovaps %zmm0, %zmm6
+; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm4 = [4,20,1,27]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm4
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm7 = [5,20,1,19,5,20,5,23]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm7
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [4,20,1,19,4,20,5,23]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm0
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm12 = [4,28,1,17]
 ; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm12
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm8 = [4,28,1,29]
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm8 = [5,20,1,17,5,20,5,21]
 ; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm8
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm7 = <5,20,u,u>
-; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm7
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm4 = [4,21,1,7]
-; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm4
-; CHECK-NEXT:    vextractf128 $1, %ymm3, %xmm5
-; CHECK-NEXT:    vextractf128 $1, %ymm2, %xmm9
-; CHECK-NEXT:    vunpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm10 = xmm9[0,1],xmm2[1],xmm9[3]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm1 = xmm10[0,1,2],xmm3[1]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm6 = xmm4[0,1,2],xmm3[1]
-; CHECK-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT:    vextractf32x4 $2, %zmm3, %xmm4
-; CHECK-NEXT:    vblendps {{.*#+}} xmm4 = xmm10[0,1,2],xmm4[3]
-; CHECK-NEXT:    vpermilps {{.*#+}} xmm11 = xmm2[3,3,3,3]
-; CHECK-NEXT:    vunpcklps {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
-; CHECK-NEXT:    vshufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[1,3]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm3[1]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm11 = xmm7[0,1],xmm2[1],xmm7[3]
-; CHECK-NEXT:    vblendps {{.*#+}} xmm7 = xmm11[0,1,2],xmm3[3]
-; CHECK-NEXT:    vblendps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[3]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm8 = xmm8[0,1,2],xmm3[1]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm11 = xmm11[0,1,2],xmm3[1]
-; CHECK-NEXT:    vaddps %xmm8, %xmm11, %xmm8
-; CHECK-NEXT:    vshufps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3,3]
-; CHECK-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[2]
-; CHECK-NEXT:    vaddps %xmm0, %xmm2, %xmm2
-; CHECK-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT:    vaddps %xmm1, %xmm12, %xmm9
-; CHECK-NEXT:    vaddps %xmm1, %xmm1, %xmm3
+; CHECK-NEXT:    vmovaps {{.*#+}} xmm9 = [4,30,1,22]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm9
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm10 = [4,22,1,17,4,22,5,21]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm10
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm11 = [4,20,3,18,4,20,7,22]
+; CHECK-NEXT:    vpermi2ps %zmm3, %zmm2, %zmm11
+; CHECK-NEXT:    vaddps %xmm10, %xmm11, %xmm2
+; CHECK-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT:    vaddps %xmm1, %xmm9, %xmm3
+; CHECK-NEXT:    vaddps %xmm12, %xmm8, %xmm9
+; CHECK-NEXT:    vaddps %xmm1, %xmm1, %xmm8
 ; CHECK-NEXT:    vaddps %xmm0, %xmm10, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm8, %xmm0
+; CHECK-NEXT:    vaddps %xmm0, %xmm9, %xmm0
 ; CHECK-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vmovaps %xmm3, {{[0-9]+}}(%rsp)
-; CHECK-NEXT:    vmovaps %xmm9, (%rsp)
+; CHECK-NEXT:    vmovaps %xmm8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    vmovaps %xmm3, (%rsp)
+; CHECK-NEXT:    # kill: def $xmm1 killed $xmm1 killed $ymm1
 ; CHECK-NEXT:    vmovaps %xmm13, %xmm3
+; CHECK-NEXT:    # kill: def $xmm4 killed $xmm4 killed $zmm4
+; CHECK-NEXT:    # kill: def $xmm5 killed $xmm5 killed $zmm5
+; CHECK-NEXT:    # kill: def $xmm6 killed $xmm6 killed $zmm6
+; CHECK-NEXT:    # kill: def $xmm7 killed $xmm7 killed $zmm7
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    callq foo at PLT
-; CHECK-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; CHECK-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; CHECK-NEXT:    vaddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; CHECK-NEXT:    vaddps %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    addq $72, %rsp
+; CHECK-NEXT:    addq $136, %rsp
 ; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> <i32 4, i32 20, i32 1, i32 17>
   %a2 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> <i32 4, i32 21, i32 1, i32 17>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index de2ff070fdaeb..772060af4f1a4 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -782,310 +782,114 @@ define void @load_i16_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-SLOW-LABEL: load_i16_stride5_vf8:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm3, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %eax, %xmm3, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2],xmm4[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %eax, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %eax, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3,4,5],xmm0[6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm3, %eax
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vmovd %edi, %xmm6
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %eax, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm3, %eax
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vmovd %edi, %xmm7
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vmovd %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vmovd %edi, %xmm8
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %eax, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm3, %eax
-; AVX512F-SLOW-NEXT:    vmovd %eax, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5],ymm2[6],ymm0[7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1,2,3],xmm1[4,5],xmm3[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,10,11,4,5,14,15,8,9,2,3,12,13,u,u]
+; AVX512F-SLOW-NEXT:    vpbroadcastw 70(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm3[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7,8],ymm0[9],ymm2[10],ymm0[11],ymm2[12,13],ymm0[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6],xmm5[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
+; AVX512F-SLOW-NEXT:    vpsllq $48, %xmm3, %xmm5
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11],ymm0[12],ymm2[13,14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10],ymm0[11],ymm2[12],ymm0[13],ymm2[14,15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[0,1,0,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5],ymm0[6],ymm2[7,8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%r8)
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%r9)
+; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride5_vf8:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,1,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2],xmm4[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $3, %eax, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $5, %eax, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3,4,5],xmm0[6],xmm4[7]
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrw $2, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $3, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $4, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $5, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $6, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $7, %eax, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[4,5,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrw $2, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $3, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $4, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $5, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $6, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $7, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vmovd %edi, %xmm8
-; AVX512F-FAST-NEXT:    vpinsrw $1, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $2, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $3, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $4, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $5, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $6, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $7, %eax, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm3, %eax
-; AVX512F-FAST-NEXT:    vmovd %eax, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4,5],xmm3[6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,10,11,4,5,14,15,8,9,2,3,12,13,u,u]
+; AVX512F-FAST-NEXT:    vpbroadcastw 70(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6],xmm5[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,3,12,13,6,7,0,1,10,11,4,5,14,15,u,u]
+; AVX512F-FAST-NEXT:    vpsllq $48, %xmm3, %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm3[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm4, (%rdx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, (%rcx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%r8)
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%r9)
+; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i16_stride5_vf8:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,9,14,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm1
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <3,8,13,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,7,12,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,6,11,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,5,10,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %eax, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %eax, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,5],xmm1[6],xmm3[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %eax, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %eax, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %eax, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %eax, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovd %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %eax, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm2, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %eax, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %eax, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm3[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%r9)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i16_stride5_vf8:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,9,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm1
-; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <3,8,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm4
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,7,12,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <1,6,11,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,5,10,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm7
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm7, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %eax, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm2[0,1,2,3,4,5],xmm0[6],xmm2[7]
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %eax, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %eax, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %eax, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %eax, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %eax, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %eax, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %eax, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm7, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm1, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %eax, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm7, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %eax, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm2, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm5, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%r9)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i16_stride5_vf8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,5,10,15,20,25,30,35]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,6,11,16,21,26,31,36]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,7,12,17,22,27,32,37]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,8,13,18,23,28,33,38]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,9,14,19,24,29,34,39]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <40 x i16>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35>
   %strided.vec1 = shufflevector <40 x i16> %wide.vec, <40 x i16> poison, <8 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index b7465fb61f7ce..f51e3bb074006 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -944,399 +944,151 @@ define void @load_i16_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-LABEL: load_i16_stride6_vf8:
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %r10d, %xmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm1, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm5, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %r10d, %xmm4, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm7
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm1[3],xmm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm8
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5],xmm5[6],xmm8[7]
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vmovd %edi, %xmm9
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm10
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm0[4],xmm10[5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm2
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vpslld $16, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm7[0,2,0,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpbroadcastw 74(%rdi), %xmm6
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2],xmm3[3],xmm7[4,5],xmm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm6[0,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm4[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, (%rdx)
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride6_vf8:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm3, %r10d
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[0,1,12,13,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrw $2, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm1, %r10d
-; AVX512F-FAST-NEXT:    vpinsrw $3, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %r10d
-; AVX512F-FAST-NEXT:    vpinsrw $4, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm0, %r10d
-; AVX512F-FAST-NEXT:    vpinsrw $5, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm5, %r10d
-; AVX512F-FAST-NEXT:    vpinsrw $6, %r10d, %xmm4, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[2,3,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm1[3],xmm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vmovd %r10d, %xmm8
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5],xmm5[6],xmm8[7]
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vmovd %edi, %xmm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vmovd %r10d, %xmm10
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm0[4],xmm10[5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vmovd %r10d, %xmm2
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rdx)
+; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
+; AVX512F-FAST-NEXT:    vpslld $16, %xmm0, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3]
+; AVX512F-FAST-NEXT:    vpbroadcastw 74(%rdi), %xmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[2,1,2,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[2,2,2,2,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, (%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i16_stride6_vf8:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <3,9,15,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm1
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,8,14,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <1,7,13,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,6,12,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm1, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %r10d, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %r10d, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %r10d, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %r10d, %xmm2, %xmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm1[3],xmm6[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5],xmm3[6],xmm5[7]
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm8, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm7, %r10d
-; AVX512BW-SLOW-NEXT:    vmovd %r10d, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm0[4],xmm10[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm8, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm7, %r10d
-; AVX512BW-SLOW-NEXT:    vmovd %r10d, %xmm7
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm7, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i16_stride6_vf8:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <5,11,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,10,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,9,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,8,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm7
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,7,13,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,6,12,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm2, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %r10d, %xmm3, %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovd %xmm3, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm3, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm5, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %r10d, %xmm4, %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm4, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %r10d, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm2[3],xmm8[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm10, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5],xmm5[6],xmm7[7]
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm10, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm6, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i16_stride6_vf8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,6,12,18,24,30,36,42]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,7,13,19,25,31,37,43]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,8,14,20,26,32,38,44]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,9,15,21,27,33,39,45]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,10,16,22,28,34,40,46]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,11,17,23,29,35,41,47]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512BW-NEXT:    vmovdqa %xmm7, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <48 x i16>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
   %strided.vec1 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 7b78444f1d0f9..c9d085de3a5b6 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -1266,476 +1266,192 @@ define void @load_i16_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %r11d, %xmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm1, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm4, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %r11d, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm6, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %r11d, %xmm5, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm8
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vmovd %edi, %xmm9
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm4[5],xmm9[6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm10
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm11
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm1[2],xmm11[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5],xmm6[6],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm12
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $3, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $3, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vmovd %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrw $6, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm2
-; AVX512F-SLOW-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, (%r10)
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,7,6]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,3,0,1,14,15,12,13,10,11,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2,3,4],xmm6[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm2[4],xmm7[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3,4,5],xmm9[6],xmm8[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[4,5,2,3,0,1,14,15,12,13,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3,4],xmm7[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2],xmm10[3],xmm11[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,2,1,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5],xmm2[6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,2,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0],xmm11[1,2,3,4,5,6],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,0,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,4,7,6]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,6,4,6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%r10)
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride7_vf8:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm3, %r11d
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[0,1,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrw $2, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm1, %r11d
-; AVX512F-FAST-NEXT:    vpinsrw $3, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm4, %r11d
-; AVX512F-FAST-NEXT:    vpinsrw $5, %r11d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm6, %r11d
-; AVX512F-FAST-NEXT:    vpinsrw $6, %r11d, %xmm5, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vmovd %r11d, %xmm8
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vmovd %edi, %xmm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm4[5],xmm9[6,7]
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vmovd %r11d, %xmm10
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vmovd %r11d, %xmm11
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm1[2],xmm11[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5],xmm6[6],xmm11[7]
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vmovd %r11d, %xmm12
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $3, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $3, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512F-FAST-NEXT:    vpextrw $6, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vmovd %r11d, %xmm2
-; AVX512F-FAST-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm5[7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm12, (%r10)
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm1[1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,12,13,10,11,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,14,15,12,13,10,11,8,9,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm7 = xmm0[0,1],xmm1[2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm2[3],xmm7[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,u,u,8,9,6,7,4,5]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,3,0,1,14,15,12,13,10,11,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2,3,4],xmm6[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm2[4],xmm7[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,10,11,8,9,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3,4,5],xmm9[6],xmm8[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[4,5,2,3,0,1,14,15,12,13,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3,4],xmm7[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0,1],xmm0[2],xmm1[3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,8,9,6,7,0,1]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2],xmm10[3],xmm11[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[6,7,4,5,2,3,0,1,14,15,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5],xmm2[6],xmm9[7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm10, %xmm12
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm11
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm1[0,1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0],xmm11[1,2,3,4,5,6],xmm2[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm3, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rcx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%r10)
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i16_stride7_vf8:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <1,8,15,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm1
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,7,14,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm1, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %r11d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm0[4],xmm3[5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm6, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %r11d, %xmm4, %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vmovd %edi, %xmm9
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm5[1],xmm9[2,3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm3[5],xmm9[6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm11
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm1[2],xmm11[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5],xmm6[6],xmm11[7]
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm12
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovd %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm2
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm4[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm12, (%r10)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i16_stride7_vf8:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,13,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <5,12,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <4,11,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,10,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,9,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,8,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,7,14,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm2, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %r11d, %xmm3, %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm4, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %r11d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm6, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %r11d, %xmm5, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm5, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %r11d, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm12
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm12, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm4[5],xmm10[6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vmovd %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm2[2],xmm8[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5],xmm6[6],xmm8[7]
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm5[7]
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm11, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm1, (%r10)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i16_stride7_vf8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,7,14,21,28,35,42,49]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,8,15,22,29,36,43,50]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,9,16,23,30,37,44,51]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,10,17,24,31,38,45,52]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,11,18,25,32,39,46,53]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,12,19,26,33,40,47,54]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,13,20,27,34,41,48,55]
+; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm8
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512BW-NEXT:    vmovdqa %xmm7, (%r10)
+; AVX512BW-NEXT:    vmovdqa %xmm8, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <56 x i16>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <56 x i16> %wide.vec, <56 x i16> poison, <8 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49>
   %strided.vec1 = shufflevector <56 x i16> %wide.vec, <56 x i16> poison, <8 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
index 6c8d41e2119e4..dcd475a2d2c38 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-8.ll
@@ -533,416 +533,164 @@ define void @load_i16_stride8_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm2, (%rax)
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-LABEL: load_i16_stride8_vf8:
-; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    pushq %rbx
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-NEXT:    vmovd %xmm1, %ebx
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-NEXT:    vpinsrw $2, %ebx, %xmm4, %xmm4
-; AVX512F-NEXT:    vmovd %xmm0, %ebx
-; AVX512F-NEXT:    vpinsrw $3, %ebx, %xmm4, %xmm5
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-NEXT:    vmovd %xmm4, %ebx
-; AVX512F-NEXT:    vpinsrw $4, %ebx, %xmm5, %xmm6
-; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-NEXT:    vmovd %xmm5, %ebx
-; AVX512F-NEXT:    vpinsrw $5, %ebx, %xmm6, %xmm7
-; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-NEXT:    vmovd %xmm6, %ebx
-; AVX512F-NEXT:    vpinsrw $6, %ebx, %xmm7, %xmm8
-; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512F-NEXT:    vmovd %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512F-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512F-NEXT:    vmovd %edi, %xmm9
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512F-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $1, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512F-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512F-NEXT:    vpextrw $2, %xmm2, %ebx
-; AVX512F-NEXT:    vmovd %ebx, %xmm10
-; AVX512F-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm1[2],xmm10[3,4,5,6,7]
-; AVX512F-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpextrw $2, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpextrw $2, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512F-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512F-NEXT:    vpextrw $3, %xmm2, %ebx
-; AVX512F-NEXT:    vmovd %ebx, %xmm11
-; AVX512F-NEXT:    vpinsrw $1, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpextrw $3, %xmm1, %edi
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm0[3],xmm11[4,5,6,7]
-; AVX512F-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpextrw $3, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512F-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512F-NEXT:    vpextrw $4, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm12, %xmm12
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm4[4],xmm12[5,6,7]
-; AVX512F-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512F-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512F-NEXT:    vpextrw $4, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512F-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512F-NEXT:    vpextrw $5, %xmm2, %ebx
-; AVX512F-NEXT:    vmovd %ebx, %xmm13
-; AVX512F-NEXT:    vpinsrw $1, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpextrw $5, %xmm1, %edi
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm5[5],xmm13[6,7]
-; AVX512F-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpextrw $5, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm13, %xmm13
-; AVX512F-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512F-NEXT:    vpextrw $6, %xmm2, %ebx
-; AVX512F-NEXT:    vmovd %ebx, %xmm14
-; AVX512F-NEXT:    vpinsrw $1, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpextrw $6, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5],xmm6[6],xmm14[7]
-; AVX512F-NEXT:    vpextrw $6, %xmm7, %edi
-; AVX512F-NEXT:    vpinsrw $7, %edi, %xmm14, %xmm14
-; AVX512F-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512F-NEXT:    vpextrw $7, %xmm2, %ebx
-; AVX512F-NEXT:    vmovd %ebx, %xmm2
-; AVX512F-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512F-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512F-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm1
-; AVX512F-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512F-NEXT:    vpinsrw $3, %edi, %xmm1, %xmm0
-; AVX512F-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512F-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512F-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512F-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512F-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512F-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm7[7]
-; AVX512F-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512F-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512F-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512F-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512F-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512F-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512F-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512F-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-NEXT:    popq %rbx
-; AVX512F-NEXT:    retq
+; AVX512F-SLOW-LABEL: load_i16_stride8_vf8:
+; AVX512F-SLOW:       # %bb.0:
+; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,0,4>
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm5[2,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm6[0,1,2],xmm8[3]
+; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vpermt2d %xmm5, %xmm3, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm15 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm15[0,1],xmm6[2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm15 = xmm15[0],xmm13[1],xmm15[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm15[0,1],xmm7[2,3]
+; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm15 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm15[0,1],xmm8[2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <3,7,u,u>
+; AVX512F-SLOW-NEXT:    vpermt2d %xmm13, %xmm15, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm14[0,1],xmm9[2,3]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpermi2d %xmm0, %xmm1, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[1,1,1,1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm10[0],xmm2[1],xmm10[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1],xmm5[2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm1[0,1,2],xmm10[3]
+; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
+; AVX512F-SLOW-NEXT:    vpermt2d %xmm2, %xmm15, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm4[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, (%r9)
+; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, (%rax)
+; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%rax)
+; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-SLOW-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i16_stride8_vf8:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    pushq %rbx
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %ebx
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %ebx, %xmm4, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %ebx, %xmm5, %xmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vmovd %xmm5, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %ebx, %xmm6, %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %ebx, %xmm7, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vmovd %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vmovd %edi, %xmm9
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm3[1],xmm9[2,3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $1, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm1[2],xmm10[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $2, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm11
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm0[3],xmm11[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $3, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm4[4],xmm12[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $4, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm13
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm5[5],xmm13[6,7]
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrw $5, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm14
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5],xmm6[6],xmm14[7]
-; AVX512BW-SLOW-NEXT:    vpextrw $6, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $7, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm2
-; AVX512BW-SLOW-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $2, %edi, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $3, %edi, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm7[7]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    popq %rbx
-; AVX512BW-SLOW-NEXT:    retq
+; AVX512F-FAST-LABEL: load_i16_stride8_vf8:
+; AVX512F-FAST:       # %bb.0:
+; AVX512F-FAST-NEXT:    vmovdqa 112(%rdi), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,0,4>
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, %xmm0
+; AVX512F-FAST-NEXT:    vpermt2d %xmm4, %xmm2, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm11
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <1,5,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa %xmm13, %xmm15
+; AVX512F-FAST-NEXT:    vpermt2d %xmm12, %xmm14, %xmm15
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm15[0,1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, %xmm15
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,2,6>
+; AVX512F-FAST-NEXT:    vpermt2d %xmm4, %xmm0, %xmm15
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm15[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <3,7,u,u>
+; AVX512F-FAST-NEXT:    vpermt2d %xmm12, %xmm15, %xmm13
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm7 = xmm13[0,1],xmm7[2,3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
+; AVX512F-FAST-NEXT:    vpermi2d %xmm1, %xmm3, %xmm2
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm8[0,1],xmm2[2,3]
+; AVX512F-FAST-NEXT:    vpermi2d %xmm5, %xmm6, %xmm14
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm8 = xmm14[0,1],xmm8[2,3]
+; AVX512F-FAST-NEXT:    vpermi2d %xmm1, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm9[0,1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vpermt2d %xmm5, %xmm15, %xmm6
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, (%rdx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, (%r9)
+; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rax)
+; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, (%rax)
+; AVX512F-FAST-NEXT:    retq
 ;
-; AVX512BW-FAST-LABEL: load_i16_stride8_vf8:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    pushq %rbx
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <7,15,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm2
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,14,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <5,13,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,11,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm12
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,10,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,9,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vmovd %xmm3, %ebx
-; AVX512BW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %ebx, %xmm4, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %ebx, %xmm5, %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %ebx, %xmm6, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vmovd %xmm6, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %ebx, %xmm7, %xmm8
-; AVX512BW-FAST-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512BW-FAST-NEXT:    vmovd %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrw $1, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm3[2],xmm11[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $2, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm2[3],xmm12[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrw $3, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm4[4],xmm13[5,6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrw $4, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm5[5],xmm9[6,7]
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $5, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm6[6],xmm1[7]
-; AVX512BW-FAST-NEXT:    vpextrw $6, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $7, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm3, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $2, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $3, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrw $7, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm7[7]
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm10, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm11, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm12, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm13, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%r11)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm1, (%r10)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    popq %rbx
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i16_stride8_vf8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,8,16,24,32,40,48,56]
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vmovdqa64 64(%rdi), %zmm2
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,9,17,25,33,41,49,57]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,10,18,26,34,42,50,58]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm4
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,11,19,27,35,43,51,59]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,12,20,28,36,44,52,60]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm6
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,13,21,29,37,45,53,61]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm7
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14,22,30,38,46,54,62]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = [7,15,23,31,39,47,55,63]
+; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm1, %zmm9
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512BW-NEXT:    vmovdqa %xmm7, (%r11)
+; AVX512BW-NEXT:    vmovdqa %xmm8, (%r10)
+; AVX512BW-NEXT:    vmovdqa %xmm9, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <64 x i16>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <8 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56>
   %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <8 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57>
@@ -9283,12 +9031,15 @@ define void @load_i16_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512: {{.*}}
+; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
index 536a9cc45f859..5da2a75454b5c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
@@ -284,157 +284,28 @@ define void @load_i32_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i32_stride5_vf4:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,5,10,15]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vpermt2d 32(%rdi), %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrd $3, %xmm3, %eax
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vmovd %xmm6, %eax
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm0[0,1,2],xmm2[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrd $1, %xmm6, %eax
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %eax, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrd $1, %xmm4, %eax
-; AVX512F-SLOW-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrd $2, %xmm6, %eax
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%r9)
-; AVX512F-SLOW-NEXT:    vzeroupper
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i32_stride5_vf4:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,5,10,15]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vpermt2d 32(%rdi), %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [25769803777,25769803777]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vpextrd $3, %xmm4, %eax
-; AVX512F-FAST-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vmovd %xmm6, %eax
-; AVX512F-FAST-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [30064771074,30064771074]
-; AVX512F-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %eax
-; AVX512F-FAST-NEXT:    vpinsrd $2, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrd $1, %xmm6, %eax
-; AVX512F-FAST-NEXT:    vpinsrd $3, %eax, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrd $1, %xmm5, %eax
-; AVX512F-FAST-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrd $2, %xmm6, %eax
-; AVX512F-FAST-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[3]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512F-FAST-NEXT:    vzeroupper
-; AVX512F-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i32_stride5_vf4:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,5,10,15]
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    vpermt2d 32(%rdi), %ymm0, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrd $3, %xmm3, %eax
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,3]
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vmovd %xmm6, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %eax
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm0[0,1,2],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %eax, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrd $1, %xmm6, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %eax, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrd $1, %xmm4, %eax
-; AVX512BW-SLOW-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrd $2, %xmm6, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],xmm6[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, (%r9)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i32_stride5_vf4:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,5,10,15]
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    vpermt2d 32(%rdi), %ymm0, %ymm1
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm0 = [25769803777,25769803777]
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrd $3, %xmm4, %eax
-; AVX512BW-FAST-NEXT:    vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vmovd %xmm6, %eax
-; AVX512BW-FAST-NEXT:    vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [30064771074,30064771074]
-; AVX512BW-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm7
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %eax
-; AVX512BW-FAST-NEXT:    vpinsrd $2, %eax, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrd $1, %xmm6, %eax
-; AVX512BW-FAST-NEXT:    vpinsrd $3, %eax, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrd $1, %xmm5, %eax
-; AVX512BW-FAST-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10,11]
-; AVX512BW-FAST-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrd $2, %xmm6, %eax
-; AVX512BW-FAST-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm6[3]
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512-LABEL: load_i32_stride5_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,5,10,15]
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512-NEXT:    vpermt2d 32(%rdi), %ymm2, %ymm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,6,11,16]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,7,12,17]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,8,13,18]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,9,14,19]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vmovdqa %xmm3, (%rsi)
+; AVX512-NEXT:    vmovdqa %xmm2, (%rdx)
+; AVX512-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %wide.vec = load <20 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 0, i32 5, i32 10, i32 15>
   %strided.vec1 = shufflevector <20 x i32> %wide.vec, <20 x i32> poison, <4 x i32> <i32 1, i32 6, i32 11, i32 16>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 0b7c89211ee94..8c0cc12f8429f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -375,301 +375,31 @@ define void @load_i32_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i32_stride6_vf4:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vpextrd $2, %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrd $1, %r10d, %xmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vmovd %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpextrd $2, %xmm5, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrd $1, %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,3,2,3]
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %r10d, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm1[0,1],xmm2[2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,0,2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm3[2],xmm7[3]
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2,3]
-; AVX512F-SLOW-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrd $2, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm3
-; AVX512F-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512F-SLOW-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrd $1, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512F-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: load_i32_stride6_vf4:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [25769803776,25769803776]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [17179869186,17179869186]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836483,21474836483]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm4, %r10d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %r10d, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm8, %r10d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %r10d, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [30064771073,30064771073]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm9, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm4, %r10d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %r10d, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1],xmm4[2],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm10, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $3, %xmm4, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm10, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm10, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm3, %xmm9, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-FAST-LABEL: load_i32_stride6_vf4:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [25769803776,25769803776]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm5
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [17179869186,17179869186]
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm2, %xmm0, %xmm6
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836483,21474836483]
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm2, %xmm0, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm1, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovd %xmm3, %r10d
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %r10d, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm8, %r10d
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %r10d, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [30064771073,30064771073]
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm1, %xmm9, %xmm5
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm3, %r10d
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %r10d, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1],xmm3[2],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512DQ-FAST-NEXT:    vmovd %xmm10, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm3
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm10, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm10, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm2, %xmm9, %xmm1
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm10[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i32_stride6_vf4:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrd $2, %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrd $1, %r10d, %xmm2, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %r10d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrd $2, %xmm5, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %r10d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrd $1, %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm2[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %r10d, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm1[0,1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,0,2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm3[2],xmm7[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512BW-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrd $2, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $1, %edi, %xmm0, %xmm3
-; AVX512BW-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrd $1, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm4, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: load_i32_stride6_vf4:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [25769803776,25769803776]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [17179869186,17179869186]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836483,21474836483]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm2, %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm1, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm4, %r10d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %r10d, %xmm2, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm8, %r10d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %r10d, %xmm2, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [30064771073,30064771073]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm9, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm4, %r10d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %r10d, %xmm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1],xmm4[2],xmm6[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm10, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $3, %xmm4, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm10, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm3, %xmm0, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm10, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm1, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm3, %xmm9, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i32_stride6_vf4:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm3
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm4 = [25769803776,25769803776]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm0, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm6 = [17179869186,17179869186]
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm2, %xmm0, %xmm6
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836483,21474836483]
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm2, %xmm0, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm1, %xmm4, %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm3, %r10d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %r10d, %xmm0, %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm8, %r10d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %r10d, %xmm0, %xmm0
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [30064771073,30064771073]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm1, %xmm9, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm3, %r10d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %r10d, %xmm5, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1],xmm3[2],xmm6[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm10, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512DQBW-FAST-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm3
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm10, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm2, %xmm1, %xmm4
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm4, %xmm4
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm10, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm2, %xmm9, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm10[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm0, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm4, (%r9)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512-LABEL: load_i32_stride6_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,6,12,18]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,7,13,19]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,8,14,20]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,9,15,21]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,10,16,22]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,11,17,23]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512-NEXT:    vmovdqa %xmm7, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %wide.vec = load <24 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
   %strided.vec1 = shufflevector <24 x i32> %wide.vec, <24 x i32> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
@@ -10732,9 +10462,13 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX: {{.*}}
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index 7e7a79ef19f13..27c77459f4696 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -560,465 +560,35 @@ define void @load_i32_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: load_i32_stride7_vf4:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm3, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm4, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm2, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm4[1],xmm5[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $2, %xmm2, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm5[1],xmm7[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm8, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm9, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm5, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm3, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm8[2],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm9, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm8, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,0,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $2, %xmm9, %r11d
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 24(%rdi), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm3, (%r10)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: load_i32_stride7_vf4:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [30064771072,30064771072]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [25769803779,25769803779]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm4, %xmm3, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [17179869185,17179869185]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm4, %xmm9, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $3, %xmm1, %r11d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm8, %r11d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [21474836482,21474836482]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm11, %r11d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm11, %r11d
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm12, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm4, %xmm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm11[2],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm12, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm1, %xmm0, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $3, %xmm11, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm12, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm1, %xmm10, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-SLOW-LABEL: load_i32_stride7_vf4:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm2, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm3, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm1[0,1],xmm0[2],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm1, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm4[0],xmm3[1],xmm4[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,3]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpextrd $2, %xmm1, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm8, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm9
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm9, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm4, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm2, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm8[2],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm9, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm4, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm8, %r11d
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,3]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpextrd $2, %xmm9, %r11d
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 24(%rdi), %xmm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm4, (%r9)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, (%r10)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQ-SLOW-NEXT:    retq
-;
-; AVX512DQ-FAST-LABEL: load_i32_stride7_vf4:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [30064771072,30064771072]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [25769803779,25769803779]
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm4, %xmm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm3, %xmm1, %xmm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm0[2],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %r11d, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [17179869185,17179869185]
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm4, %xmm9, %xmm5
-; AVX512DQ-FAST-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %r11d, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm8, %r11d
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [21474836482,21474836482]
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512DQ-FAST-NEXT:    vmovd %xmm11, %r11d
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm11, %r11d
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vmovd %xmm12, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm4, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm11[2],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm12, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm0, %xmm3, %xmm9
-; AVX512DQ-FAST-NEXT:    vpextrd $3, %xmm11, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm4
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm12, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm0, %xmm10, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm3, %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm7, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: load_i32_stride7_vf4:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm3, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm4, %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm0[2],xmm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm2, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm1, %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm4[1],xmm5[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $2, %xmm2, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm5[1],xmm7[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm8, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm2[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,2,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm4, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm9, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm4, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm5, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm3, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm8[2],xmm5[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm9, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm8, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,0,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm3, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $2, %xmm9, %r11d
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpbroadcastd 24(%rdi), %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm4, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm5, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm3, (%r10)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: load_i32_stride7_vf4:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [30064771072,30064771072]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [25769803779,25769803779]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm4, %xmm3, %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm0, %xmm2, %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [17179869185,17179869185]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm4, %xmm9, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $3, %xmm1, %r11d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm8, %r11d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [21474836482,21474836482]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm11, %r11d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm11, %r11d
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm12, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm4, %xmm0, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm11[2],xmm2[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm12, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm1, %xmm0, %xmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $3, %xmm11, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm12, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm1, %xmm10, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm3, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm7, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: load_i32_stride7_vf4:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm2, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm3, %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm1[0,1],xmm0[2],xmm1[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm1, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm4[0],xmm3[1],xmm4[2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vpextrd $2, %xmm1, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm8, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0,1,2],xmm1[3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,2,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm3, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm9
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm9, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm3, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm4, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $1, %r11d, %xmm2, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm8[2],xmm4[3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm9, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm4, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm8, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3]
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %r11d, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpextrd $2, %xmm9, %r11d
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %r11d, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpbroadcastd 24(%rdi), %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3]
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm9[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm4, (%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm2, (%r10)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i32_stride7_vf4:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm1 = [30064771072,30064771072]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm2, %xmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [25769803779,25769803779]
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm4, %xmm2, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm3, %xmm1, %xmm2
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm0[2],xmm2[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm8, %r11d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %r11d, %xmm2, %xmm2
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm9 = [17179869185,17179869185]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm4, %xmm9, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpextrd $3, %xmm0, %r11d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %r11d, %xmm5, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm8, %r11d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %r11d, %xmm5, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [21474836482,21474836482]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm11, %r11d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %r11d, %xmm6, %xmm6
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3]
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm11, %r11d
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %r11d, %xmm7, %xmm7
-; AVX512DQBW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm12, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm4, %xmm3, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm11[2],xmm1[3]
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm12, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm1, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm0, %xmm3, %xmm9
-; AVX512DQBW-FAST-NEXT:    vpextrd $3, %xmm11, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm4
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm12, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm0, %xmm10, %xmm3
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm8, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm3, %xmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm2, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm5, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm6, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm7, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm1, (%r9)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512-LABEL: load_i32_stride7_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,7,14,21]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,8,15,22]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,9,16,23]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,10,17,24]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,11,18,25]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,12,19,26]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,13,20,27]
+; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm8
+; AVX512-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512-NEXT:    vmovdqa %xmm7, (%r10)
+; AVX512-NEXT:    vmovdqa %xmm8, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %wide.vec = load <28 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 0, i32 7, i32 14, i32 21>
   %strided.vec1 = shufflevector <28 x i32> %wide.vec, <28 x i32> poison, <4 x i32> <i32 1, i32 8, i32 15, i32 22>
@@ -13126,6 +12696,14 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX: {{.*}}
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
index 3d013fd0be792..17f94a2d6979f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-8.ll
@@ -435,513 +435,39 @@ define void @load_i32_stride8_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-ONLY-SLOW-LABEL: load_i32_stride8_vf4:
-; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    pushq %rbx
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm5[1],xmm7[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm2, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm2, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm3, %ebx
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[1,1,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0],xmm1[1],xmm9[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm3, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm9, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm1[0,1],xmm3[2],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm0, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm9, (%r11)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    popq %rbx
-; AVX512F-ONLY-SLOW-NEXT:    retq
-;
-; AVX512F-ONLY-FAST-LABEL: load_i32_stride8_vf4:
-; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    pushq %rbx
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm3, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm7, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm8 = [21474836481,21474836481]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm5, %xmm8, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm3, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm7, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0,1],xmm3[2],xmm9[3]
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm7, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [30064771075,30064771075]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm5, %xmm10, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $3, %xmm3, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm4, %ebx
-; AVX512F-ONLY-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovd %xmm7, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %xmm6, %xmm0, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm4, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $1, %xmm7, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],xmm4[2],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $2, %xmm7, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm11, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %xmm6, %xmm10, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpextrd $3, %xmm4, %edi
-; AVX512F-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm8, (%r11)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm11, (%r10)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    popq %rbx
-; AVX512F-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQ-SLOW-LABEL: load_i32_stride8_vf4:
-; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    pushq %rbx
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm1, %ebx
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2,3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm1, %ebx
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm7, %xmm7
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm7, %xmm7
-; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm2[0,1],xmm1[2],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm4, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm1, %ebx
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512DQ-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0],xmm0[1],xmm9[2,3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm2, %edi
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %edi, %xmm9, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm2[2],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512DQ-SLOW-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpextrd $3, %xmm2, %edi
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm4, (%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, (%r9)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm9, (%r11)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm3, (%r10)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQ-SLOW-NEXT:    popq %rbx
-; AVX512DQ-SLOW-NEXT:    retq
-;
-; AVX512DQ-FAST-LABEL: load_i32_stride8_vf4:
-; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    pushq %rbx
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovd %xmm0, %ebx
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovd %xmm6, %ebx
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836481,21474836481]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, %xmm8
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm3, %xmm7, %xmm8
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm0, %ebx
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %ebx, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %ebx, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0,1],xmm0[2],xmm9[3]
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %ebx, %xmm9, %xmm9
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [30064771075,30064771075]
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm3, %xmm10, %xmm1
-; AVX512DQ-FAST-NEXT:    vpextrd $3, %xmm0, %ebx
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovd %xmm1, %ebx
-; AVX512DQ-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %ebx, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 112(%rdi), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermi2d %xmm4, %xmm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm1, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpextrd $1, %xmm6, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],xmm1[2],xmm11[3]
-; AVX512DQ-FAST-NEXT:    vpextrd $2, %xmm6, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $3, %edi, %xmm11, %xmm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm2
-; AVX512DQ-FAST-NEXT:    vpextrd $3, %xmm1, %edi
-; AVX512DQ-FAST-NEXT:    vpinsrd $2, %edi, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm7, (%r11)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm11, (%r10)
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512DQ-FAST-NEXT:    popq %rbx
-; AVX512DQ-FAST-NEXT:    retq
-;
-; AVX512BW-ONLY-SLOW-LABEL: load_i32_stride8_vf4:
-; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    pushq %rbx
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm5[1],xmm7[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm2, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm7, %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm7, %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm3[0,1],xmm2[2],xmm3[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm2, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],xmm6[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm3, %ebx
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[1,1,1,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0],xmm1[1],xmm9[2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm3, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm9, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm1[0,1],xmm3[2],xmm1[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $3, %edi, %xmm4, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpextrd $3, %xmm3, %edi
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpinsrd $2, %edi, %xmm1, %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm0, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm5, (%rcx)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm2, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm6, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm9, (%r11)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm4, (%r10)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512BW-ONLY-SLOW-NEXT:    popq %rbx
-; AVX512BW-ONLY-SLOW-NEXT:    retq
-;
-; AVX512BW-ONLY-FAST-LABEL: load_i32_stride8_vf4:
-; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    pushq %rbx
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm3, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 16(%rdi), %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 48(%rdi), %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm7, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm1, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm8 = [21474836481,21474836481]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm4, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm5, %xmm8, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm3, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm2, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm7, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm2, %xmm2
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0,1],xmm3[2],xmm9[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm7, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %ebx, %xmm9, %xmm9
-; AVX512BW-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [30064771075,30064771075]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm5, %xmm10, %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $3, %xmm3, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm4, %xmm3
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm7[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm4, %ebx
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512BW-ONLY-FAST-NEXT:    vmovd %xmm7, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2d %xmm6, %xmm0, %xmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm4, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm8, %xmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $1, %xmm7, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm8, %xmm8
-; AVX512BW-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],xmm4[2],xmm11[3]
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $2, %xmm7, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $3, %edi, %xmm11, %xmm11
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %xmm6, %xmm10, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpextrd $3, %xmm4, %edi
-; AVX512BW-ONLY-FAST-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm1, (%rsi)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%rdx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm3, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm5, (%r9)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm8, (%r11)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm11, (%r10)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-ONLY-FAST-NEXT:    popq %rbx
-; AVX512BW-ONLY-FAST-NEXT:    retq
-;
-; AVX512DQBW-SLOW-LABEL: load_i32_stride8_vf4:
-; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    pushq %rbx
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm1, %ebx
-; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[1,1,1,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm7[0],xmm4[1],xmm7[2,3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm1, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm7, %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm7, %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm2[0,1],xmm1[2],xmm2[3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %ebx, %xmm4, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm1, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512DQBW-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %ebx, %xmm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm8
-; AVX512DQBW-SLOW-NEXT:    vmovd %xmm8, %edi
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm6, %xmm6
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[1,1,1,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0],xmm0[1],xmm9[2,3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm2, %edi
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm9, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vpextrd $1, %xmm8, %edi
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm9, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm2[2],xmm0[3]
-; AVX512DQBW-SLOW-NEXT:    vpextrd $2, %xmm8, %edi
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpextrd $3, %xmm2, %edi
-; AVX512DQBW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpinsrd $2, %edi, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm4, (%rcx)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm1, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm6, (%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm9, (%r11)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm3, (%r10)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512DQBW-SLOW-NEXT:    popq %rbx
-; AVX512DQBW-SLOW-NEXT:    retq
-;
-; AVX512DQBW-FAST-LABEL: load_i32_stride8_vf4:
-; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    pushq %rbx
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512DQBW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm0, %ebx
-; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512DQBW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %ebx, %xmm5, %xmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm6, %ebx
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %ebx, %xmm5, %xmm5
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [21474836481,21474836481]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm1, %xmm8
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm3, %xmm7, %xmm8
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm0, %ebx
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %ebx, %xmm8, %xmm8
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm6, %ebx
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %ebx, %xmm8, %xmm8
-; AVX512DQBW-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0,1],xmm0[2],xmm9[3]
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm6, %ebx
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %ebx, %xmm9, %xmm9
-; AVX512DQBW-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm10 = [30064771075,30064771075]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm3, %xmm10, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpextrd $3, %xmm0, %ebx
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %ebx, %xmm1, %xmm0
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm1, %ebx
-; AVX512DQBW-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %ebx, %xmm3, %xmm3
-; AVX512DQBW-FAST-NEXT:    vmovdqa 112(%rdi), %xmm6
-; AVX512DQBW-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm3, %xmm3
-; AVX512DQBW-FAST-NEXT:    vpermi2d %xmm4, %xmm2, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm1, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm7, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpextrd $1, %xmm6, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm7, %xmm7
-; AVX512DQBW-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1],xmm1[2],xmm11[3]
-; AVX512DQBW-FAST-NEXT:    vpextrd $2, %xmm6, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $3, %edi, %xmm11, %xmm11
-; AVX512DQBW-FAST-NEXT:    vpermt2d %xmm4, %xmm10, %xmm2
-; AVX512DQBW-FAST-NEXT:    vpextrd $3, %xmm1, %edi
-; AVX512DQBW-FAST-NEXT:    vpinsrd $2, %edi, %xmm2, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm6[3]
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm0, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm3, (%r9)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm7, (%r11)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm11, (%r10)
-; AVX512DQBW-FAST-NEXT:    vmovdqa %xmm1, (%rax)
-; AVX512DQBW-FAST-NEXT:    popq %rbx
-; AVX512DQBW-FAST-NEXT:    retq
+; AVX512-LABEL: load_i32_stride8_vf4:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,8,16,24]
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512-NEXT:    vmovdqa64 64(%rdi), %zmm2
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,9,17,25]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm4 = [2,10,18,26]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm4
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = [3,11,19,27]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm5
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,12,20,28]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm6
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = [5,13,21,29]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,14,22,30]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm8
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm9 = [7,15,23,31]
+; AVX512-NEXT:    vpermi2d %zmm2, %zmm1, %zmm9
+; AVX512-NEXT:    vmovdqa %xmm0, (%rsi)
+; AVX512-NEXT:    vmovdqa %xmm3, (%rdx)
+; AVX512-NEXT:    vmovdqa %xmm4, (%rcx)
+; AVX512-NEXT:    vmovdqa %xmm5, (%r8)
+; AVX512-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512-NEXT:    vmovdqa %xmm7, (%r11)
+; AVX512-NEXT:    vmovdqa %xmm8, (%r10)
+; AVX512-NEXT:    vmovdqa %xmm9, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %wide.vec = load <32 x i32>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
   %strided.vec1 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <4 x i32> <i32 1, i32 9, i32 17, i32 25>
@@ -9800,6 +9326,14 @@ define void @load_i32_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST: {{.*}}
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
+; AVX512BW-ONLY-FAST: {{.*}}
+; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512DQ-FAST: {{.*}}
+; AVX512DQ-SLOW: {{.*}}
+; AVX512DQBW-FAST: {{.*}}
+; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-ONLY-FAST: {{.*}}
+; AVX512F-ONLY-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index 5103dc0901900..ab9da42de3ca3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -804,152 +804,122 @@ define void @load_i8_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512-LABEL: load_i8_stride5_vf16:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512-NEXT:    vpextrb $4, %xmm2, %eax
-; AVX512-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[0,5,10,15,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT:    vpinsrb $4, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $9, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $14, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $3, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $8, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $8, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $13, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $2, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $7, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm3, %xmm3
-; AVX512-NEXT:    vpextrb $12, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $12, %eax, %xmm3, %xmm5
-; AVX512-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512-NEXT:    vpextrb $1, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm5, %xmm5
-; AVX512-NEXT:    vpextrb $6, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm5, %xmm5
-; AVX512-NEXT:    vpextrb $11, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm5, %xmm5
-; AVX512-NEXT:    vmovd %xmm2, %eax
-; AVX512-NEXT:    vpshufb {{.*#+}} xmm6 = xmm4[1,6,11],zero,xmm4[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT:    vpinsrb $3, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $5, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $4, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $10, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $15, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $4, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $9, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $8, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $14, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $3, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $8, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $13, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $12, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $2, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $7, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $12, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm6, %xmm6
-; AVX512-NEXT:    vpextrb $1, %xmm2, %eax
-; AVX512-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[2,7,12],zero,xmm4[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT:    vpinsrb $3, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $6, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $4, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $11, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vmovd %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $5, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $10, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $8, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $15, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $4, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $9, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $14, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $12, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $3, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $8, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $13, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm7, %xmm7
-; AVX512-NEXT:    vpextrb $2, %xmm2, %eax
-; AVX512-NEXT:    vpshufb {{.*#+}} xmm8 = xmm4[3,8,13],zero,xmm4[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT:    vpinsrb $3, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $7, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $4, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $12, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $1, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $6, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $11, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $8, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vmovd %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $5, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $10, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $15, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $12, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $4, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $9, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $14, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm8, %xmm8
-; AVX512-NEXT:    vpextrb $3, %xmm2, %eax
-; AVX512-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[4,9,14],zero,xmm4[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512-NEXT:    vpinsrb $3, %eax, %xmm4, %xmm4
-; AVX512-NEXT:    vpextrb $8, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $4, %eax, %xmm4, %xmm4
-; AVX512-NEXT:    vpextrb $13, %xmm2, %eax
-; AVX512-NEXT:    vpinsrb $5, %eax, %xmm4, %xmm2
-; AVX512-NEXT:    vpextrb $2, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT:    vpextrb $7, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT:    vpextrb $12, %xmm1, %eax
-; AVX512-NEXT:    vpinsrb $8, %eax, %xmm2, %xmm1
-; AVX512-NEXT:    vpextrb $1, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512-NEXT:    vpextrb $6, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512-NEXT:    vpextrb $11, %xmm0, %eax
-; AVX512-NEXT:    vpinsrb $11, %eax, %xmm1, %xmm0
-; AVX512-NEXT:    vmovd %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vpextrb $5, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vpextrb $10, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vpextrb $15, %xmm3, %eax
-; AVX512-NEXT:    vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512-NEXT:    vmovdqa %xmm5, (%rsi)
-; AVX512-NEXT:    vmovdqa %xmm6, (%rdx)
-; AVX512-NEXT:    vmovdqa %xmm7, (%rcx)
-; AVX512-NEXT:    vmovdqa %xmm8, (%r8)
-; AVX512-NEXT:    vmovdqa %xmm0, (%r9)
-; AVX512-NEXT:    retq
+; AVX512F-LABEL: load_i8_stride5_vf16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm0
+; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm0
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,5,10,15],zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[u,u,u]
+; AVX512F-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,128,128,128]
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm0, %xmm2
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[1,6,11]
+; AVX512F-NEXT:    vpor %xmm6, %xmm2, %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa %ymm2, %ymm7
+; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[0,5,10,15],zero,zero,zero,xmm7[3,8,13,u,u,u]
+; AVX512F-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm7, %xmm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[2,7,12]
+; AVX512F-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
+; AVX512F-NEXT:    vpternlogq $202, %ymm4, %ymm5, %ymm8
+; AVX512F-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[4,9,14,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,7,12],zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[u,u,u]
+; AVX512F-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[3,8,13]
+; AVX512F-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512F-NEXT:    vpternlogq $202, %ymm4, %ymm5, %ymm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
+; AVX512F-NEXT:    vpor %xmm1, %xmm9, %xmm1
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[4,9,14]
+; AVX512F-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm4, %ymm5, %ymm2
+; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512F-NEXT:    vmovdqa %xmm6, (%rsi)
+; AVX512F-NEXT:    vmovdqa %xmm7, (%rdx)
+; AVX512F-NEXT:    vmovdqa %xmm8, (%rcx)
+; AVX512F-NEXT:    vmovdqa %xmm1, (%r8)
+; AVX512F-NEXT:    vmovdqa %xmm0, (%r9)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: load_i8_stride5_vf16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512BW-NEXT:    movw $19026, %ax # imm = 0x4A52
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,xmm3[4,9,14],zero,zero,zero,xmm3[2,7,12,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,5,10,15],zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,128,128,128]
+; AVX512BW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[1,6,11]
+; AVX512BW-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512BW-NEXT:    movw $21140, %ax # imm = 0x5294
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm5 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[1,6,11],zero,zero,zero,zero,xmm5[4,9,14],zero,zero,zero,xmm5[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512BW-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[2,7,12]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512BW-NEXT:    movw $10570, %ax # imm = 0x294A
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm6 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[1,6,11],zero,zero,zero,zero,xmm7[4,9,14,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,7,12],zero,zero,zero,xmm6[0,5,10,15],zero,zero,zero,xmm6[u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512BW-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[3,8,13]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm7 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[3,8,13],zero,zero,zero,xmm7[1,6,11],zero,zero,zero,zero,xmm7[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[2,7,12],zero,zero,zero,xmm7[0,5,10,15,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm4[4,9,14]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm3, %xmm3
+; AVX512BW-NEXT:    vmovdqu16 %ymm0, %ymm1 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,xmm0[3,8,13],zero,zero,zero,xmm0[1,6,11,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,9,14],zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm5, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%r9)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <80 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <80 x i8> %wide.vec, <80 x i8> poison, <16 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75>
   %strided.vec1 = shufflevector <80 x i8> %wide.vec, <80 x i8> poison, <16 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76>
@@ -4241,6 +4211,7 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST: {{.*}}
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
+; AVX512: {{.*}}
 ; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 8cdb27a9f7fa9..7e72435032a37 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -922,749 +922,143 @@ define void @load_i8_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i8_stride6_vf16:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[0,6,12],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm2, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm1, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm1, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %r10d, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm0, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %r10d, %xmm3, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %r10d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm3, %r10d
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %r10d, %xmm4, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm5[1,7,13],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[2,8,14],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[3,9,15],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm5, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm10
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm5, %r10d
-; AVX512F-SLOW-NEXT:    vmovd %r10d, %xmm5
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i8_stride6_vf16:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,6,12],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $4, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm2, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $5, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm1, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $6, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm1, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $7, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $8, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm0, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $9, %r10d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm0, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $10, %r10d, %xmm4, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm4, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $11, %r10d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm4, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $12, %r10d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm4, %r10d
-; AVX512F-FAST-NEXT:    vpinsrb $13, %r10d, %xmm5, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[1,7,13],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[2,8,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[3,9,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[4,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i8_stride6_vf16:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm2, %r10d
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[0,6,12],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm2, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm2, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm1, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm1, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %r10d, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm0, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %r10d, %xmm3, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %r10d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %r10d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm3, %r10d
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %r10d, %xmm4, %xmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm5[1,7,13],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[2,8,14],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[3,9,15],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm5, %r10d
-; AVX512BW-SLOW-NEXT:    vmovd %r10d, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm5, %r10d
-; AVX512BW-SLOW-NEXT:    vmovd %r10d, %xmm5
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm5, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    retq
+; AVX512F-LABEL: load_i8_stride6_vf16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm4, %ymm3, %ymm5
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm5[0,6,12],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[u,u,u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[0,6,12,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm7
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[4,10]
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
+; AVX512F-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $184, %xmm7, %xmm9, %xmm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[1,7,13],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[3,9,15],zero,zero,xmm6[1,7,13,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[5,11]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
+; AVX512F-NEXT:    vpor %xmm6, %xmm7, %xmm6
+; AVX512F-NEXT:    vpternlogq $184, %xmm5, %xmm9, %xmm6
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[0,6,12]
+; AVX512F-NEXT:    vpor %xmm5, %xmm7, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vpternlogq $202, %ymm3, %ymm4, %ymm7
+; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[2,8,14],zero,zero,xmm7[0,6,12],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm11, %xmm12, %xmm11
+; AVX512F-NEXT:    vpternlogq $226, %xmm5, %xmm9, %xmm11
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm2[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm2[1,7,13]
+; AVX512F-NEXT:    vpor %xmm5, %xmm12, %xmm5
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[3,9,15],zero,zero,xmm7[1,7,13],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm7, %xmm10, %xmm7
+; AVX512F-NEXT:    vpternlogq $226, %xmm5, %xmm9, %xmm7
+; AVX512F-NEXT:    vpternlogq $202, %ymm3, %ymm4, %ymm0
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[2,8,14]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
+; AVX512F-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm2[3,9,15]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa %xmm8, (%rsi)
+; AVX512F-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512F-NEXT:    vmovdqa %xmm11, (%rcx)
+; AVX512F-NEXT:    vmovdqa %xmm7, (%r8)
+; AVX512F-NEXT:    vmovdqa %xmm4, (%r9)
+; AVX512F-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
-; AVX512BW-FAST-LABEL: load_i8_stride6_vf16:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm2, %r10d
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,6,12],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm2, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm2, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm1, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm1, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm0, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm0, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %r10d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm0, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %r10d, %xmm4, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm4, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %r10d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm4, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %r10d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm4, %r10d
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %r10d, %xmm5, %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[1,7,13],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[2,8,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[3,9,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[4,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[5,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm2, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm6, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm7, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm10, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride6_vf16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512BW-NEXT:    movw $18724, %r10w # imm = 0x4924
+; AVX512BW-NEXT:    kmovd %r10d, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm2 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512BW-NEXT:    vmovdqa 80(%rdi), %xmm5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10]
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero
+; AVX512BW-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512BW-NEXT:    movw $-2048, %di # imm = 0xF800
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vmovdqu8 %xmm6, %xmm3 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero
+; AVX512BW-NEXT:    vpor %xmm4, %xmm6, %xmm4
+; AVX512BW-NEXT:    vmovdqu8 %xmm4, %xmm2 {%k2}
+; AVX512BW-NEXT:    movw $9362, %di # imm = 0x2492
+; AVX512BW-NEXT:    kmovd %edi, %k3
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm4 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12]
+; AVX512BW-NEXT:    vpor %xmm9, %xmm10, %xmm9
+; AVX512BW-NEXT:    vmovdqu8 %xmm9, %xmm8 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-NEXT:    vmovdqu8 %xmm6, %xmm4 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %ymm0, %ymm1 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14]
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15]
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    vmovdqa %xmm3, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm8, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%r9)
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <96 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90>
   %strided.vec1 = shufflevector <96 x i8> %wide.vec, <96 x i8> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91>
@@ -5686,14 +5080,18 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512: {{.*}}
+; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-FAST: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
+; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index b7619df82d388..d3e8b9f2b21b0 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -1475,911 +1475,199 @@ define void @load_i8_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i8_stride7_vf16:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,7,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm2, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm1, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm1, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm0, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm0, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %r11d, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm0, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %r11d, %xmm4, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm4, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %r11d, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm4, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %r11d, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm6, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %r11d, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm6, %r11d
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %r11d, %xmm5, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[1,8,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm9
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm10
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vmovd %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm11
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm12
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm3, %r11d
-; AVX512F-SLOW-NEXT:    vmovd %r11d, %xmm3
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, (%r10)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i8_stride7_vf16:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,7,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm2, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $4, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm1, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $5, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm1, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $6, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm0, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $7, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm0, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $8, %r11d, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm0, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $9, %r11d, %xmm4, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm4, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $10, %r11d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm4, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $11, %r11d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm6, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $12, %r11d, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm6, %r11d
-; AVX512F-FAST-NEXT:    vpinsrb $13, %r11d, %xmm5, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[1,8,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[2,9],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[3,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[4,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm3[5,12],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[6,13],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm12, (%r10)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i8_stride7_vf16:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,7,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm2, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm1, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm1, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm0, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm0, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %r11d, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm0, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %r11d, %xmm4, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm4, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %r11d, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm4, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %r11d, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm6, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %r11d, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm6, %r11d
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %r11d, %xmm5, %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[1,8,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm9
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovd %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm11
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm12
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm3, %r11d
-; AVX512BW-SLOW-NEXT:    vmovd %r11d, %xmm3
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm12, (%r10)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    retq
+; AVX512F-LABEL: load_i8_stride7_vf16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm5
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm7 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm7[2,9]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
+; AVX512F-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm8, %ymm6
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512F-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm7, %xmm6, %xmm9
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0],xmm3[1],xmm4[2,3,4],xmm3[5],xmm4[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm6[3,10]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero
+; AVX512F-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $184, %xmm9, %xmm7, %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm10[2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[0,7,14],zero,zero,xmm10[3,10,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm11, %xmm10, %xmm10
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm11[4,11]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm0[6,13],zero,zero
+; AVX512F-NEXT:    vpor %xmm12, %xmm11, %xmm11
+; AVX512F-NEXT:    vpternlogq $184, %xmm10, %xmm7, %xmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm10, %ymm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm12
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm13, %xmm12, %xmm12
+; AVX512F-NEXT:    vpblendd {{.*#+}} xmm13 = xmm4[0],xmm3[1],xmm4[2],xmm3[3]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm13[5,12]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm15 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero
+; AVX512F-NEXT:    vpor %xmm15, %xmm14, %xmm14
+; AVX512F-NEXT:    vpternlogq $184, %xmm12, %xmm7, %xmm14
+; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm8
+; AVX512F-NEXT:    vextracti128 $1, %ymm8, %xmm12
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm12, %xmm8, %xmm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm13[6,13]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero
+; AVX512F-NEXT:    vpor %xmm13, %xmm12, %xmm12
+; AVX512F-NEXT:    vpternlogq $184, %xmm8, %xmm7, %xmm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm9
+; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[3,10],zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5,6],xmm3[7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm9[0,7,14]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[2,9],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm13, %xmm9, %xmm9
+; AVX512F-NEXT:    vpternlogq $184, %xmm8, %xmm7, %xmm9
+; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm10[6,13],zero,zero,xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u],zero,zero,xmm0[3,10],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpternlogq $184, %xmm1, %xmm7, %xmm0
+; AVX512F-NEXT:    vmovdqa %xmm5, (%rsi)
+; AVX512F-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512F-NEXT:    vmovdqa %xmm11, (%rcx)
+; AVX512F-NEXT:    vmovdqa %xmm14, (%r8)
+; AVX512F-NEXT:    vmovdqa %xmm12, (%r9)
+; AVX512F-NEXT:    vmovdqa %xmm9, (%r10)
+; AVX512F-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
-; AVX512BW-FAST-LABEL: load_i8_stride7_vf16:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm2, %r11d
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[0,7,14],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm2, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm1, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm1, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm0, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm0, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %r11d, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm0, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %r11d, %xmm4, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm4, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %r11d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm4, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %r11d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm6, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %r11d, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm6, %r11d
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %r11d, %xmm5, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[1,8,15],zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[2,9],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[3,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vmovd %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[4,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vmovd %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm3[5,12],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[6,13],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512BW-FAST-NEXT:    vmovd %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm7, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm10, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm11, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm12, (%r10)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride7_vf16:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-NEXT:    vmovdqa 96(%rdi), %xmm0
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9]
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512BW-NEXT:    movw $-28382, %r11w # imm = 0x9122
+; AVX512BW-NEXT:    kmovd %r11d, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 80(%rdi), %xmm5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX512BW-NEXT:    movw $4644, %di # imm = 0x1224
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm6 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,8,15],zero,zero,xmm6[4,11],zero,zero,xmm6[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2,3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,7,14],zero,zero,xmm7[3,10]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero
+; AVX512BW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-NEXT:    movw $-512, %di # imm = 0xFE00
+; AVX512BW-NEXT:    kmovd %edi, %k1
+; AVX512BW-NEXT:    vmovdqu8 %xmm7, %xmm6 {%k1}
+; AVX512BW-NEXT:    movw $8772, %di # imm = 0x2244
+; AVX512BW-NEXT:    kmovd %edi, %k3
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm7 {%k3}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[2,9],zero,zero,zero,xmm7[5,12],zero,zero,xmm7[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,xmm7[0,7,14],zero,zero,xmm7[3,10,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,1,8,15],zero,zero,xmm8[4,11]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero
+; AVX512BW-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512BW-NEXT:    vmovdqu8 %xmm8, %xmm7 {%k1}
+; AVX512BW-NEXT:    movw $9288, %di # imm = 0x2448
+; AVX512BW-NEXT:    kmovd %edi, %k4
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm8 {%k4}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512BW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm9[5,12]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512BW-NEXT:    vpor %xmm11, %xmm10, %xmm10
+; AVX512BW-NEXT:    vmovdqu8 %xmm10, %xmm8 {%k1}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm11, %xmm10, %xmm10
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm9[6,13]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15],zero,zero
+; AVX512BW-NEXT:    vpor %xmm11, %xmm9, %xmm9
+; AVX512BW-NEXT:    vmovdqu8 %xmm9, %xmm10 {%k1}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm9 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm11
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[5,12],zero,zero,xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm11, %xmm9, %xmm9
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6],xmm0[7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,4,11],zero,zero,xmm11[0,7,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm12, %xmm11, %xmm11
+; AVX512BW-NEXT:    vmovdqu8 %xmm11, %xmm9 {%k1}
+; AVX512BW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k4}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT:    vmovdqu8 %xmm0, %xmm2 {%k1}
+; AVX512BW-NEXT:    vmovdqa %xmm4, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %xmm7, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512BW-NEXT:    vmovdqa %xmm10, (%r9)
+; AVX512BW-NEXT:    vmovdqa %xmm9, (%r10)
+; AVX512BW-NEXT:    vmovdqa %xmm2, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <112 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <112 x i8> %wide.vec, <112 x i8> poison, <16 x i32> <i32 0, i32 7, i32 14, i32 21, i32 28, i32 35, i32 42, i32 49, i32 56, i32 63, i32 70, i32 77, i32 84, i32 91, i32 98, i32 105>
   %strided.vec1 = shufflevector <112 x i8> %wide.vec, <112 x i8> poison, <16 x i32> <i32 1, i32 8, i32 15, i32 22, i32 29, i32 36, i32 43, i32 50, i32 57, i32 64, i32 71, i32 78, i32 85, i32 92, i32 99, i32 106>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
index abcd453623693..9cbb3fea50c7d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
@@ -1580,1075 +1580,121 @@ define void @load_i8_stride8_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm1, (%rax)
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i8_stride8_vf16:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    pushq %rbx
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512F-SLOW-NEXT:    vpmovqb %xmm3, %xmm4
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %ebx, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm2, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %ebx, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovd %xmm1, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %ebx, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm1, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %ebx, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovd %xmm0, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %ebx, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm0, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %ebx, %xmm4, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vmovd %xmm4, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %ebx, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm4, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %ebx, %xmm5, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovd %xmm5, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %ebx, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm5, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %ebx, %xmm6, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %ebx, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm6, %ebx
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %ebx, %xmm7, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vmovd %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $8, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm9
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $1, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $9, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm10
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $2, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $10, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm11
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $3, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $11, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm12
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $4, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $12, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm13
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $5, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $13, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm14
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $6, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $14, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm3, %edi
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm3, %ebx
-; AVX512F-SLOW-NEXT:    vmovd %ebx, %xmm3
-; AVX512F-SLOW-NEXT:    vpinsrb $1, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $8, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $7, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpextrb $15, %xmm7, %edi
-; AVX512F-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-SLOW-NEXT:    popq %rbx
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i8_stride8_vf16:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    pushq %rbx
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovd %xmm2, %ebx
-; AVX512F-FAST-NEXT:    vpmovqb %xmm3, %xmm4
-; AVX512F-FAST-NEXT:    vpinsrb $2, %ebx, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm2, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $3, %ebx, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovd %xmm1, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $4, %ebx, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm1, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $5, %ebx, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovd %xmm0, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $6, %ebx, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm0, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $7, %ebx, %xmm4, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vmovd %xmm4, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $8, %ebx, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm4, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $9, %ebx, %xmm5, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vmovd %xmm5, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $10, %ebx, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm5, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $11, %ebx, %xmm6, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vmovd %xmm6, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $12, %ebx, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm6, %ebx
-; AVX512F-FAST-NEXT:    vpinsrb $13, %ebx, %xmm7, %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vmovd %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $8, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[1,9],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $1, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $9, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[2,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $2, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $10, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[3,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $3, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $11, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm3[4,12],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $4, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $12, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm3[5,13],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $5, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $13, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm3[6,14],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $6, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $14, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[7,15],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $8, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $7, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpextrb $15, %xmm7, %edi
-; AVX512F-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512F-FAST-NEXT:    popq %rbx
-; AVX512F-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i8_stride8_vf16:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    pushq %rbx
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-SLOW-NEXT:    vmovd %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vpmovqb %xmm3, %xmm4
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm2, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm1, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm1, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %ebx, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm0, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %ebx, %xmm4, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vmovd %xmm4, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %ebx, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm4, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %ebx, %xmm5, %xmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vmovd %xmm5, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %ebx, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm5, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %ebx, %xmm6, %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vmovd %xmm6, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %ebx, %xmm7, %xmm7
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm6, %ebx
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %ebx, %xmm7, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vmovd %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $8, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm9
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $1, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $9, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm10
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $2, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $10, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm11
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $3, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $11, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm12
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $4, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $12, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm13
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $5, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $13, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm13, %xmm13
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm14
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $6, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $14, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm3, %edi
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm3, %ebx
-; AVX512BW-SLOW-NEXT:    vmovd %ebx, %xmm3
-; AVX512BW-SLOW-NEXT:    vpinsrb $1, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $8, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $7, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpextrb $15, %xmm7, %edi
-; AVX512BW-SLOW-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    popq %rbx
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i8_stride8_vf16:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    pushq %rbx
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-FAST-NEXT:    vmovd %xmm2, %ebx
-; AVX512BW-FAST-NEXT:    vpmovqb %xmm3, %xmm4
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm2, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm1, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm1, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm0, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %ebx, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm0, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %ebx, %xmm4, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vmovd %xmm4, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %ebx, %xmm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm4, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %ebx, %xmm5, %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512BW-FAST-NEXT:    vmovd %xmm5, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %ebx, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm5, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %ebx, %xmm6, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vmovd %xmm6, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %ebx, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm6, %ebx
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %ebx, %xmm7, %xmm8
-; AVX512BW-FAST-NEXT:    vmovdqa 112(%rdi), %xmm7
-; AVX512BW-FAST-NEXT:    vmovd %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $8, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm8, %xmm8
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[1,9],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $1, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $9, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[2,10],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $2, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $10, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[3,11],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $3, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $11, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm11, %xmm11
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm3[4,12],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $4, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $12, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm3[5,13],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $5, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $13, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm3[6,14],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $6, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $14, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[7,15],zero,zero,xmm3[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpinsrb $2, %edi, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm2, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $3, %edi, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $4, %edi, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm1, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $5, %edi, %xmm2, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $6, %edi, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm0, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $7, %edi, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $8, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm4, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $9, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm5, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $11, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $12, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm6, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $13, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $7, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $14, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpextrb $15, %xmm7, %edi
-; AVX512BW-FAST-NEXT:    vpinsrb $15, %edi, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm9, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm10, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm11, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm12, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm13, (%r11)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm14, (%r10)
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm0, (%rax)
-; AVX512BW-FAST-NEXT:    popq %rbx
-; AVX512BW-FAST-NEXT:    retq
+; AVX512-LABEL: load_i8_stride8_vf16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vmovdqa 112(%rdi), %xmm0
+; AVX512-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
+; AVX512-NEXT:    vmovdqa 96(%rdi), %xmm1
+; AVX512-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512-NEXT:    vpshufb %xmm5, %xmm2, %xmm6
+; AVX512-NEXT:    vmovdqa 64(%rdi), %xmm3
+; AVX512-NEXT:    vpshufb %xmm5, %xmm3, %xmm5
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
+; AVX512-NEXT:    vmovdqa64 (%rdi), %zmm5
+; AVX512-NEXT:    vpmovqb %zmm5, %xmm6
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm6, %xmm0, %xmm7
+; AVX512-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
+; AVX512-NEXT:    vpshufb %xmm7, %xmm3, %xmm7
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3]
+; AVX512-NEXT:    vpsrlq $8, %zmm5, %zmm7
+; AVX512-NEXT:    vpmovqb %zmm7, %xmm7
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
+; AVX512-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm8, %xmm2, %xmm9
+; AVX512-NEXT:    vpshufb %xmm8, %xmm3, %xmm8
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3]
+; AVX512-NEXT:    vpsrlq $16, %zmm5, %zmm8
+; AVX512-NEXT:    vpmovqb %zmm8, %xmm8
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm8, %xmm0, %xmm9
+; AVX512-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm9, %xmm2, %xmm10
+; AVX512-NEXT:    vpshufb %xmm9, %xmm3, %xmm9
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3]
+; AVX512-NEXT:    vpsrlq $24, %zmm5, %zmm9
+; AVX512-NEXT:    vpmovqb %zmm9, %xmm9
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm8 = xmm9[0,1],xmm8[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm9, %xmm0, %xmm10
+; AVX512-NEXT:    vpshufb %xmm9, %xmm1, %xmm9
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm10, %xmm2, %xmm11
+; AVX512-NEXT:    vpshufb %xmm10, %xmm3, %xmm10
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3]
+; AVX512-NEXT:    vpsrlq $32, %zmm5, %zmm10
+; AVX512-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm10, %xmm0, %xmm11
+; AVX512-NEXT:    vpshufb %xmm10, %xmm1, %xmm10
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm11, %xmm2, %xmm12
+; AVX512-NEXT:    vpshufb %xmm11, %xmm3, %xmm11
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3]
+; AVX512-NEXT:    vpsrlq $40, %zmm5, %zmm11
+; AVX512-NEXT:    vpmovqb %zmm11, %xmm11
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm11, %xmm0, %xmm12
+; AVX512-NEXT:    vpshufb %xmm11, %xmm1, %xmm11
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm12, %xmm2, %xmm13
+; AVX512-NEXT:    vpshufb %xmm12, %xmm3, %xmm12
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3]
+; AVX512-NEXT:    vpsrlq $48, %zmm5, %zmm12
+; AVX512-NEXT:    vpmovqb %zmm12, %xmm12
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
+; AVX512-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
+; AVX512-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512-NEXT:    vpsrlq $56, %zmm5, %zmm1
+; AVX512-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512-NEXT:    vmovdqa %xmm4, (%rsi)
+; AVX512-NEXT:    vmovdqa %xmm6, (%rdx)
+; AVX512-NEXT:    vmovdqa %xmm7, (%rcx)
+; AVX512-NEXT:    vmovdqa %xmm8, (%r8)
+; AVX512-NEXT:    vmovdqa %xmm9, (%r9)
+; AVX512-NEXT:    vmovdqa %xmm10, (%r11)
+; AVX512-NEXT:    vmovdqa %xmm11, (%r10)
+; AVX512-NEXT:    vmovdqa %xmm0, (%rax)
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
   %wide.vec = load <128 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <16 x i32> <i32 0, i32 8, i32 16, i32 24, i32 32, i32 40, i32 48, i32 56, i32 64, i32 72, i32 80, i32 88, i32 96, i32 104, i32 112, i32 120>
   %strided.vec1 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <16 x i32> <i32 1, i32 9, i32 17, i32 25, i32 33, i32 41, i32 49, i32 57, i32 65, i32 73, i32 81, i32 89, i32 97, i32 105, i32 113, i32 121>


        


More information about the llvm-commits mailing list