[llvm] 23d4254 - [X86] combineINSERT_SUBVECTOR - fold (insert_subvector X, (insert_subvector undef, Y, 0), Idx) -> (insert_subvector X, Y, Idx)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 5 05:07:47 PST 2023


Author: Simon Pilgrim
Date: 2023-02-05T13:07:20Z
New Revision: 23d4254a0f16b604cb1ea64d8def94623e04d283

URL: https://github.com/llvm/llvm-project/commit/23d4254a0f16b604cb1ea64d8def94623e04d283
DIFF: https://github.com/llvm/llvm-project/commit/23d4254a0f16b604cb1ea64d8def94623e04d283.diff

LOG: [X86] combineINSERT_SUBVECTOR - fold (insert_subvector X, (insert_subvector undef, Y, 0), Idx) -> (insert_subvector X, Y, Idx)

Helps some shuffle combines that do a poor job of peeking through (insert_subvector undef, Y, 0) vector widening patterns

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
    llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
    llvm/test/CodeGen/X86/vector-shuffle-v192.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fffcea65a5713..7c9356254880a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55583,6 +55583,16 @@ static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   if (IsI1Vector)
     return SDValue();
 
+  // Eliminate an intermediate vector widening:
+  // insert_subvector X, (insert_subvector undef, Y, 0), Idx -->
+  // insert_subvector X, Y, Idx
+  // TODO: This is a more general version of a DAGCombiner fold, can we move it
+  // there?
+  if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
+    return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
+                       SubVec.getOperand(1), N->getOperand(2));
+
   // If this is an insert of an extract, combine to a shuffle. Don't do this
   // if the insert or extract can be represented with a subregister operation.
   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index bb2e790362e48..c943ba53ae9bc 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -2626,7 +2626,7 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15]
 ; AVX512BW-NEXT:    vpbroadcastb %xmm0, %xmm0
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3688,7 +3688,7 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,25,26,0,28,29,0,31]
 ; AVX512BW-NEXT:    vpermi2w %ymm1, %ymm0, %ymm2
 ; AVX512BW-NEXT:    vpbroadcastw %xmm0, %xmm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -4450,7 +4450,7 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,0]
 ; AVX512BW-SLOW-NEXT:    vpermd %zmm0, %zmm1, %zmm1
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 6037b9a68c056..7f838ec95d645 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -2084,12 +2084,12 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ;
 ; AVX512BW-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16:
 ; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    vpbroadcastb (%rdi), %xmm0
-; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm1 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15]
-; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15]
+; AVX512BW-NEXT:    vpbroadcastb (%rdi), %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2962,26 +2962,26 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512F-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
 ; AVX512F-NEXT:    vpbroadcastw (%rdi), %xmm1
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],mem[1,2],xmm1[3],mem[4,5],xmm1[6],mem[7]
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rdx)
-; AVX512F-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rdx)
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8:
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vpbroadcastw (%rdi), %xmm0
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1,2],xmm0[3],mem[4,5],xmm0[6],mem[7]
 ; AVX512DQ-NEXT:    vpbroadcastw (%rdi), %xmm1
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],mem[1,2],xmm1[3],mem[4,5],xmm1[6],mem[7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
-; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rdx)
-; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rdx)
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
+; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rdx)
+; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rdx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index c32461f762fce..59ff5b4f8f45b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -4119,7 +4119,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5,6],ymm11[7]
@@ -4169,7 +4169,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3],xmm6[4,5],xmm2[6],xmm6[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm2, %zmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
@@ -4207,7 +4207,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
@@ -4257,7 +4257,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5,6],xmm8[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm8, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm30
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
@@ -4298,7 +4298,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm12[1],ymm0[2,3,4,5],ymm12[6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
@@ -4340,7 +4340,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4],xmm3[5],xmm7[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
@@ -4405,7 +4405,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm6[1],ymm12[2,3],ymm6[4],ymm12[5,6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
@@ -4454,7 +4454,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3],xmm7[4,5],xmm3[6],xmm7[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm3
@@ -4493,7 +4493,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
@@ -4540,7 +4540,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5,6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3],xmm5[4,5,6,7]
@@ -4575,62 +4575,62 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm10[0,1,2,3],xmm3[4],xmm10[5],xmm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3],xmm3[4],xmm9[5],xmm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3],xmm11[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm11[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm12[1],xmm0[2,3],xmm12[4],xmm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm23, %ymm15, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm23, %ymm12, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm10[0,1,2,3],xmm7[4],xmm10[5],xmm7[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5],ymm9[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1,2,3,4],ymm7[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0],ymm10[1],ymm12[2,3,4,5],ymm10[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm12[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2,3],xmm10[4],xmm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3],xmm11[4],xmm9[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm23, %ymm10, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm23, %ymm9, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
@@ -4683,7 +4683,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[3],xmm2[4,5],xmm8[6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm16
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm22
@@ -4735,7 +4735,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm15[3],xmm7[4,5],xmm15[6],xmm7[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm7, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
@@ -4774,7 +4774,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm16
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
@@ -4822,7 +4822,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5,6],xmm7[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm7
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
@@ -4860,7 +4860,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm9
@@ -4902,7 +4902,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[1,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
@@ -4948,11 +4948,10 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[2,1,0,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1],ymm13[2],ymm4[3,4],ymm13[5],ymm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm4
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm10
@@ -4962,7 +4961,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm16
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm4, %zmm16
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
@@ -4972,9 +4971,10 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm9[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm9, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm1[1],ymm12[2,3,4,5],ymm1[6],ymm12[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm9, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm27
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm2[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
@@ -5013,7 +5013,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm15[3],xmm10[4,5],xmm15[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm10, %zmm5
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm10, %zmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
@@ -5033,15 +5033,15 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm28
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm8[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm30
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,0,3]
@@ -5051,7 +5051,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,4]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
@@ -5062,7 +5062,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2],xmm0[3],xmm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm12[0,1],ymm9[2],ymm12[3],ymm9[4],ymm12[5,6],ymm9[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm14[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
@@ -5097,7 +5098,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
@@ -5117,14 +5118,14 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,2,2,2,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
@@ -5134,7 +5135,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm12[1],ymm9[2,3,4,5],ymm12[6],ymm9[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm13[2],ymm8[3,4],ymm13[5],ymm8[6,7]
@@ -5156,13 +5157,13 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3,4,5],ymm7[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm7[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm13
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm13[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm14[0,1,2,3,4],ymm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
@@ -5179,13 +5180,13 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
@@ -5193,11 +5194,11 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rdx)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm13
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-FAST-NEXT:    popq %rax
 ; AVX512DQ-FAST-NEXT:    vzeroupper
@@ -9290,37 +9291,37 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm17
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm17[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm12, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm10, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm22
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
@@ -9328,21 +9329,21 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 480(%rdi), %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm8[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -9351,9 +9352,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm1, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm29
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -9371,112 +9373,112 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0],ymm4[1],ymm1[2,3,4,5],ymm4[6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm31
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm7, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm7[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0,1,2],xmm11[3],xmm2[4,5],xmm11[6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm3, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm11[0,1,2],xmm13[3],xmm11[4,5],xmm13[6],xmm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm11[2,3],mem[2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,4,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm15, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm15, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm14, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1],xmm13[2],xmm14[3],xmm13[4,5],xmm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4,5],xmm3[6],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm8, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0,1],xmm0[2],xmm14[3],xmm0[4,5],xmm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm9, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm7, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm11, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm10, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -9490,10 +9492,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
@@ -9508,7 +9510,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
@@ -9536,8 +9538,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
@@ -9560,11 +9562,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
@@ -9577,10 +9579,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
@@ -9616,9 +9618,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm24, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
@@ -9629,7 +9631,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
@@ -9641,12 +9643,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm13, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2,3],xmm2[4],xmm12[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm13
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
@@ -9662,8 +9664,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm28, %zmm29, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm24, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
@@ -9671,7 +9673,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
@@ -9679,7 +9681,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
@@ -9712,7 +9714,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm30
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
@@ -9757,8 +9759,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[2,2,2,2,4,5,6,7]
@@ -9783,7 +9785,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm31
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm31 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
@@ -9815,7 +9817,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm0, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm19[1,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
@@ -9855,7 +9857,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm11[4],xmm9[5],xmm11[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm9, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm10, %zmm3 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
@@ -9910,18 +9912,18 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
 ; AVX512F-ONLY-FAST-NEXT:    subq $1512, %rsp # imm = 0x5E8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 544(%rdi), %ymm1
@@ -9929,25 +9931,25 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm15, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm14, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm1
@@ -9965,9 +9967,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
@@ -9992,39 +9994,39 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm10, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm10, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm11, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
@@ -10032,25 +10034,25 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm3[0,1,2],xmm12[3],xmm3[4,5],xmm12[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0,1,2],xmm9[3],xmm3[4,5],xmm9[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
@@ -10059,13 +10061,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm5, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm11, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
@@ -10079,7 +10081,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
@@ -10088,11 +10090,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6,7]
@@ -10102,7 +10104,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
@@ -10120,7 +10122,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
@@ -10132,7 +10134,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
@@ -10166,7 +10168,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6],xmm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm7
@@ -10201,10 +10203,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,3,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
@@ -10251,7 +10253,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm14, %zmm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm15
@@ -10295,7 +10297,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm8, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
@@ -10315,171 +10317,171 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm29, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm25, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm13 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm10 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm3, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm3, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm30 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, (%rsp), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm14 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm2, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm31 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm31 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm7 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm8[4],xmm0[5],xmm8[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm11, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm10, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3],xmm8[4],xmm11[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm11, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm11[1],xmm10[2,3],xmm11[4],xmm10[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0,1,2,3],xmm1[4],xmm8[5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm14
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm15, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm14
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0,1,2,3],xmm5[4],xmm9[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
@@ -10510,21 +10512,21 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm4, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm4, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 64(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, (%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 64(%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, (%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 64(%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    addq $1512, %rsp # imm = 0x5E8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
@@ -10539,9 +10541,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm25[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm24[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
@@ -10552,22 +10554,21 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm23
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm2
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm3, %xmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm15, %xmm2
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm21
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm21[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
@@ -10584,10 +10585,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[2,2,2,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
@@ -10598,8 +10599,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm2, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
@@ -10624,110 +10626,110 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0],ymm1[1],ymm3[2,3,4,5],ymm1[6],ymm3[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm3[3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm7[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0,1,2],xmm10[3],xmm2[4,5],xmm10[6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm10[0,1,2],xmm12[3],xmm10[4,5],xmm12[6],xmm10[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm10
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm10[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm28
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm12
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm16, %zmm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm10, %zmm0, %zmm6 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm14, %xmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm14, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm12[2],xmm13[3],xmm12[4,5],xmm13[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4,5],xmm3[6],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm5, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm6[2],xmm1[3],xmm6[4,5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2],ymm5[3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm0[2],xmm13[3],xmm0[4,5],xmm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm8, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm9, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm5[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm5
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2],ymm2[3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2],ymm1[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
@@ -10761,7 +10763,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
@@ -10772,14 +10774,14 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1],ymm0[2],ymm3[3],ymm0[4],ymm3[5,6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm17
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
@@ -10789,34 +10791,35 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,6,5,6,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,1,2,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
@@ -10829,7 +10832,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm19
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
@@ -10859,15 +10862,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm28
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm28
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm2
@@ -10881,33 +10884,33 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm13, %ymm13
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2,3],xmm2[4],xmm12[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm13
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm19
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm27
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
@@ -10915,7 +10918,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
@@ -10923,7 +10926,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
@@ -10934,7 +10937,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
@@ -10949,14 +10952,14 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm5, %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm22
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm22
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
@@ -10976,7 +10979,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    movw $31, %ax
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm22 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm4[1],ymm0[2,3,4,5],ymm4[6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -10985,7 +10988,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm16
@@ -10994,7 +10997,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm17
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm24
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -11026,7 +11029,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm25
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm25
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm25 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
@@ -11051,14 +11054,14 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm3[0],xmm11[1],xmm3[2,3],xmm11[4],xmm3[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm13
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm26
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm26
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm30[1,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
@@ -11070,12 +11073,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm8, %zmm0, %zmm26 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0,1,2,3],xmm8[4],xmm13[5],xmm8[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
@@ -11095,7 +11098,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm6, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm11, %zmm0, %zmm1 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
@@ -11113,15 +11116,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, 64(%rdx)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm27, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm25, %zmm27, %zmm23
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm26, %zmm27, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm27, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm3, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm25, %zmm3, %zmm23
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm26, %zmm3, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm3, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, 64(%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm1, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 64(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm27, (%r8)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 64(%r9)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, (%r9)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -11153,16 +11157,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
@@ -11189,9 +11193,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm19
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 640(%rdi), %ymm2
@@ -11218,9 +11222,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
@@ -11229,12 +11233,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm13
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
@@ -11258,10 +11262,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm13
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm3[3],xmm12[4,5],xmm3[6],xmm12[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0,1,2],xmm13[3],xmm3[4,5],xmm13[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
@@ -11270,26 +11274,26 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm17, %zmm16, %zmm6
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm6
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm5, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm7
@@ -11307,9 +11311,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm7 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
@@ -11317,12 +11321,12 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm15, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm3[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
@@ -11361,7 +11365,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
@@ -11430,7 +11434,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,6,5,6,4]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm19
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm19
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
@@ -11480,7 +11484,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm20
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm14, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm15
@@ -11516,7 +11520,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm8, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
@@ -11536,13 +11540,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm5
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm27, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm12 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm10 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
@@ -11556,124 +11560,124 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm22
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm3, %ymm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    movw $31, %ax
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm22 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $146, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm22 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $146, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm27
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm5, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm14 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm0
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm17
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm13
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm10
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm24
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm24 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm24
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm3, %zmm0, %zmm24 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm6 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
+; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3],xmm8[4],xmm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0],xmm9[1],xmm11[2,3],xmm9[4],xmm11[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm11, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm11[1],xmm10[2,3],xmm11[4],xmm10[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0,1,2,3],xmm4[4],xmm15[5],xmm4[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm26
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm8, %zmm0, %zmm26 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm26
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm9, %zmm0, %zmm26 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0,1,2,3],xmm4[4],xmm8[5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm9
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4],xmm9[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm15
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3,4],ymm4[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
@@ -11682,20 +11686,20 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm15, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm6, %zmm1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index c9d085de3a5b6..abe5aa583f962 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -5714,64 +5714,64 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride7_vf32:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $72, %rsp
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    pushq %rax
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm1[1],ymm3[2,3,4],ymm1[5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm1[1],ymm4[2,3,4],ymm1[5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm2[2],ymm5[3,4,5],ymm2[6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,6,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm19[0,1,0,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm18[0,1,0,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm0[0,1,2,1,4,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 252(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
+; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm22 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm15 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm4[2],ymm10[3,4],ymm4[5],ymm10[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7,8,9,10],ymm1[11],ymm2[12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[8,9,6,7,4,5,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2],ymm12[3],ymm7[4,5],ymm12[6],ymm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm9[2],ymm8[3,4,5],ymm9[6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm8[2],ymm7[3,4,5],ymm8[6],ymm7[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
@@ -5779,624 +5779,605 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm14[1],xmm6[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm13[1],xmm5[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm4[2,3],ymm10[4,5],ymm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[10,11,8,9,6,7,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm7[1],ymm12[2,3],ymm7[4],ymm12[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm8[3],ymm7[4,5],ymm8[6],ymm7[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm19[0,1,1,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm18[0,1,1,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm4[3],ymm10[4,5],ymm4[6],ymm10[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm25
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm7[1],ymm12[2,3,4],ymm7[5],ymm12[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3,4],ymm11[5],ymm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm19[0,1,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm6, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm5, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7,4,5,2,3,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,5],xmm2[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7],ymm0[8,9,10,11,12],ymm3[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm14, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm14, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm26
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5],xmm14[6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7],ymm0[8,9,10,11,12],ymm2[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm13, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm13, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm13
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2],ymm14[3],ymm0[4,5],ymm14[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7,8,9,10,11,12,13],ymm3[14],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[0,2,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,1,14,15,12,13,10,11,4,5,6,7,8,9,2,3,16,17,30,31,28,29,26,27,20,21,22,23,24,25,18,19]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2],xmm3[3,4,5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm15[2],ymm11[3,4],ymm15[5],ymm11[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm9[3],ymm6[4,5],ymm9[6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6],ymm3[7,8,9,10,11,12,13],ymm4[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[0,2,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,14,15,12,13,10,11,4,5,6,7,8,9,2,3,16,17,30,31,28,29,26,27,20,21,22,23,24,25,18,19]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1,2],xmm4[3,4,5,6],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm5[2],ymm12[3,4],ymm5[5],ymm12[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,3,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6],ymm4[7,8],ymm3[9,10,11,12,13,14],ymm4[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[2,3,0,1,14,15,12,13,4,5,4,5,4,5,4,5,18,19,16,17,30,31,28,29,20,21,20,21,20,21,20,21]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4,5,6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm15[2,3],ymm11[4,5],ymm15[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm0[1],ymm14[2,3],ymm0[4],ymm14[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm4[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0],ymm4[1,2,3,4,5,6],ymm15[7,8],ymm4[9,10,11,12,13,14],ymm15[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[2,3,0,1,14,15,12,13,4,5,4,5,4,5,4,5,18,19,16,17,30,31,28,29,20,21,20,21,20,21,20,21]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4,5,6],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm5[2,3],ymm12[4,5],ymm5[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm17
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0],ymm0[1],ymm14[2,3,4],ymm0[5],ymm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm3[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,7,20,21,u,u,16,17,30,31,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm15[1],ymm3[2,3,4,5,6,7,8],ymm15[9],ymm3[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0,1],ymm11[2],ymm13[3,4,5],ymm11[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm11, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm15[0,1,2,3],xmm11[4],xmm15[5],xmm11[6],xmm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm12[0,1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm11[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm11[1],xmm3[2,3,4,5],xmm11[6],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,1,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm20[0,1,2,0,4,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,7,4,8,9,10,11,12,13,15,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm11[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm0[0,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm3, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1,2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0],xmm3[1],xmm13[2,3,4,5],xmm3[6],xmm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm12[2],ymm5[3,4,5],ymm12[6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0,1,2,3],xmm10[4],xmm13[5],xmm10[6],xmm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1],ymm4[2],ymm14[3,4],ymm4[5],ymm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[1,1,2,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm3, %ymm13, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,6,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0,1,2],ymm10[3,4,5,6,7],ymm13[8,9,10],ymm10[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm0[1],ymm13[2,3,4],ymm0[5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,7,20,21,u,u,16,17,30,31,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6,7,8],ymm3[9],ymm2[10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1],ymm6[2],ymm9[3,4,5],ymm6[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm10[4],xmm3[5],xmm10[6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2],ymm15[3],ymm11[4,5],ymm15[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4],xmm8[5],xmm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[8,9,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1],xmm2[2,3,4,5],xmm10[6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,1,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm12[0,1,2,0,4,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,7,4,8,9,10,11,12,13,15,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm0[0,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm2, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1,2],ymm6[3],ymm9[4,5],ymm6[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0],xmm2[1],xmm12[2,3,4,5],xmm2[6],xmm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm11[2],ymm15[3,4,5],ymm11[6],ymm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm12[0,1,2,3],xmm9[4],xmm12[5],xmm9[6],xmm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm3[2],ymm13[3,4],ymm3[5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,2,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,6,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3,4,5,6,7],ymm12[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3],xmm8[4],xmm7[5],xmm8[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[8,9,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm1[2],ymm11[3,4,5],ymm1[6],ymm11[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm2[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm8[6],ymm2[7,8,9,10,11,12,13],ymm8[14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1,2,3,4],ymm1[5,6,7],ymm7[8,9,10,11,12],ymm1[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm1, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4,5],ymm0[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6],ymm1[7,8,9,10,11,12,13],ymm7[14],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm10[2],ymm8[3,4],ymm10[5],ymm8[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2],ymm12[3],ymm5[4,5],ymm12[6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1],ymm4[2,3],ymm14[4,5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm14[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3],ymm9[4,5,6,7,8,9,10],ymm10[11],ymm9[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0],ymm6[1],ymm0[2,3],ymm6[4],ymm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm13[1],xmm10[2,3,4,5],xmm13[6],xmm10[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1,2],ymm11[3],ymm15[4,5],ymm11[6],ymm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3,4,5],xmm7[6],xmm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1],ymm3[2,3],ymm13[4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm13[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1,2],ymm12[3],ymm9[4,5,6,7,8,9,10],ymm12[11],ymm9[12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[10,11,24,25,22,23,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7],ymm9[8,9,10],ymm8[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0],ymm5[1],ymm12[2,3],ymm5[4],ymm12[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3,4,5],xmm10[6],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2],ymm4[3],ymm14[4,5],ymm4[6],ymm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0,1,2],ymm7[3,4,5,6,7],ymm9[8,9,10],ymm7[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm9, %ymm12, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm15[1],ymm11[2,3],ymm15[4],ymm11[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm12[1],xmm7[2,3,4,5],xmm12[6],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm12[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm14[4],ymm12[5,6,7,8,9,10,11],ymm14[12],ymm12[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm10[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4],ymm10[5,6,7,8,9,10,11],ymm13[12],ymm10[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0],ymm6[1],ymm0[2,3,4],ymm6[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm13[1],xmm6[2],xmm13[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[12,13,26,27,24,25,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7],ymm10[8,9,10],ymm8[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm6, %ymm10, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0,1,2],ymm1[3],ymm11[4,5],ymm1[6],ymm11[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm8[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0],ymm8[1,2,3,4,5,6],ymm10[7,8],ymm8[9,10,11,12,13,14],ymm10[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0,1],ymm7[2,3],ymm2[4,5],ymm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0],ymm11[1],ymm1[2,3],ymm11[4],ymm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm10[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1],ymm10[2,3,4,5,6,7,8],ymm13[9],ymm10[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1,2],ymm7[3],ymm2[4,5],ymm7[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm13, %xmm20
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm20[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm13[4],xmm7[4],xmm13[5],xmm7[5],xmm13[6],xmm7[6],xmm13[7],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm14[1],xmm5[2],xmm14[3],xmm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[12,13,26,27,24,25,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0,1,2],ymm7[3,4,5,6,7],ymm12[8,9,10],ymm7[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm12, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0],ymm7[1,2,3,4,5,6],ymm12[7,8],ymm7[9,10,11,12,13,14],ymm12[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1],ymm10[2,3],ymm8[4,5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm12[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3,4,5,6,7,8],ymm14[9],ymm12[10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm5[1],ymm12[2,3,4],ymm5[5],ymm12[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2],xmm5[3],xmm10[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,6,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm12[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm8[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0],ymm15[1],ymm11[2,3,4],ymm15[5],ymm11[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2],xmm8[3],xmm11[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2],ymm0[3,4,5],ymm14[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4,5],ymm13[6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,3,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm10[1,2],ymm0[3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm6[1,2],ymm0[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm23 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm5, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm5, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm25 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm6, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm6, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm24 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm15, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm5, %zmm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm27 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm5, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm9, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, (%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm24 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm6, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm26 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm18, %zmm6, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm9, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, (%rsi)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm27, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm5, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, (%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm26, %zmm5, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm6, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm25, %zmm7 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $72, %rsp
+; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm6, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    popq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride7_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $136, %rsp
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [2,5,9,12,2,5,9,12]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [10,3,6,15,12,13,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [3,6,10,13,3,6,10,13]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <1,u,u,u,5,8,12,15>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [2,5,9,12,2,5,9,12]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [10,3,6,15,12,13,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [3,6,10,13,3,6,10,13]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <1,u,u,u,5,8,12,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <2,6,9,u,13,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm1, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm1, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <1,u,u,u,4,8,11,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm0, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <2,5,9,u,12,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm19, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,u,u,u,4,7,11,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm18, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,u,u,u,4,7,11,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,1,12,5,12,5,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm23, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm29[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm22, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm28[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm6[0,1],ymm8[2],ymm6[3,4,5],ymm8[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm11[4],xmm9[5],xmm11[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm5, %ymm9, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3],xmm14[4],xmm5[5],xmm14[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2],xmm5[3,4,5,6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5,6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm13[4],xmm10[5],xmm13[6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm9, %ymm10, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
+; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
+; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm26 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1,2],xmm13[3],xmm14[4],xmm13[5],xmm14[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm0[0,1,2],xmm13[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3,4,5],xmm3[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2],ymm5[3,4,5],ymm3[6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4,5],ymm3[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm14[4],xmm0[5],xmm14[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm15
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm10[0],xmm1[1],xmm10[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm9[3],ymm11[4,5],ymm9[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm15[0],xmm10[1],xmm15[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2,3],xmm0[4],xmm11[5],xmm0[6],xmm11[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3,4,5,6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm12[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2],xmm0[3,4,5,6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0],ymm6[1],ymm8[2,3],ymm6[4],ymm8[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm4, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3,4,5],xmm0[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1],xmm2[2,3,4,5],xmm7[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm2, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5],xmm0[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm11[2],ymm9[3,4,5],ymm11[6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [21474836482,21474836482,21474836482,21474836482]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm9[2],ymm8[3,4,5],ymm9[6],ymm8[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm15, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm24, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm23, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm22, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm6[1],ymm8[2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm20, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm19, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm7, %ymm0, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm29[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm28[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm7, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1],ymm14[2],ymm7[3,4,5],ymm14[6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm12[4],xmm4[5],xmm12[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm9[0,1,2],ymm11[3],ymm9[4,5],ymm11[6],ymm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0],xmm12[1],xmm15[2,3,4,5],xmm12[6],xmm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,11,2,11,12,5,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm15, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0,1,2],ymm4[3,4,5,6,7],ymm15[8,9,10],ymm4[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm13[2],ymm7[3,4,5],ymm13[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm12[4],xmm2[5],xmm12[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2,3,4,5],xmm12[6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,11,2,11,12,5,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm14, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0,1,2],ymm2[3,4,5,6,7],ymm14[8,9,10],ymm2[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm15, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm14, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3],xmm4[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5,6],ymm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm10, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm15, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm19, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <0,3,7,10,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,3,3,u,0,3,7,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm15, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm18, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <0,3,7,10,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,3,3,u,0,3,7,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,0,1,6,7,8,9,14,15,u,u,u,u,u,u,16,17,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm15[5,6,7],ymm4[8,9,10,11,12],ymm15[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm2, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm4, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm14[5,6,7],ymm4[8,9,10,11,12],ymm14[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm10, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm14, %zmm4, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm14[3],ymm7[4,5],ymm14[6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0],xmm0[1],xmm13[2,3,4,5],xmm0[6],xmm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1],xmm13[2,3,4,5],xmm12[6],xmm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <2,u,u,u,6,9,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm13, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7],ymm13[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm13, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm1[2,3],ymm8[4,5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,4,7,11,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm13, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm13[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0],ymm7[1],ymm14[2,3],ymm7[4],ymm14[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2,3,4,5],xmm15[6],xmm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [0,4,7,0,0,4,7,0]
-; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm25, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm12[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm4, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0],xmm2[1],xmm12[2,3,4,5],xmm2[6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,u,u,u,6,9,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7],ymm12[8,9,10],ymm11[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm12, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [2,6,9,13,2,6,9,13]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,4,7,11,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm16, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0],ymm7[1],ymm13[2,3],ymm7[4],ymm13[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm12[1],xmm0[2,3,4,5],xmm12[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,u,u,u,u,u,u,20,21,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,u,u,u,6,10,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0],ymm9[1],ymm11[2,3,4],ymm9[5],ymm11[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1],xmm4[2],xmm1[3],xmm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm4[0,1,2],ymm10[3,4,5,6,7],ymm4[8,9,10],ymm10[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <1,4,8,11,15,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm4, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm12[5,6,7],ymm1[8,9,10,11,12],ymm12[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <3,u,u,u,6,10,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm10[1],xmm15[2],xmm10[3],xmm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm12, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,8,11,15,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm7[1],ymm14[2,3,4],ymm7[5],ymm14[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,4,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0],ymm7[1],ymm13[2,3,4],ymm7[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,10,3,14,7,10,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1],ymm9[2],ymm11[3,4],ymm9[5],ymm11[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm6, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm6, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2],ymm6[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3,4,5,6,7],ymm6[8,9,10],ymm4[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    movw $992, %di # imm = 0x3E0
-; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm27 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm6, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm28, %zmm6, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm18 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm6, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm6, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %di # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm16, %zmm18 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm23 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm25, %zmm23 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm10, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, (%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm24, %zmm17 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm4, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm22 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm25, %zmm3 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm18, %zmm4, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, (%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, (%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm17, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $136, %rsp
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -6440,7 +6421,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512DQ-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm1, %zmm21 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm21 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
@@ -6470,7 +6451,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0],xmm11[1],xmm13[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm22
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm22
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
@@ -6494,7 +6475,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm24
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
@@ -6518,7 +6499,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpsrlq $48, %xmm13, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
@@ -6537,7 +6518,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm31
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm19
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm23
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm15
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm13
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
@@ -6615,7 +6596,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm17
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm0, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm10
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3,4,5],xmm0[6],xmm10[7]
@@ -6708,7 +6689,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm10
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1],ymm10[2,3],ymm7[4,5],ymm10[6,7]
@@ -6817,7 +6798,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm15[2],xmm9[2],xmm15[3],xmm9[3]
 ; AVX512DQ-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm9, %zmm8, %zmm23 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm23 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
@@ -6954,7 +6935,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm15, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <2,u,u,u,6,9,13,u>
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm6[3],ymm10[4,5],ymm6[6],ymm10[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -6981,7 +6962,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm12, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm13
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
@@ -12653,15 +12634,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 480(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
@@ -12670,321 +12653,327 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 700(%rdi), %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 672(%rdi), %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm17
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3,4],ymm6[5],ymm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm5[2],ymm4[3,4,5],ymm5[6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm17[0,1,0,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm22[0,1,0,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 252(%rdi), %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm6, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7,8,9,10],ymm0[11],ymm2[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,8,9,6,7,4,5,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3],ymm10[4,5],ymm4[6],ymm10[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1],xmm5[2,3,4,5],xmm4[6],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm5, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm6, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm7, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3],ymm5[4,5,6,7,8,9,10],ymm0[11],ymm5[12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,8,9,6,7,4,5,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm7, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm11[2],ymm12[3,4,5],ymm11[6],ymm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm2[2],ymm13[3,4,5],ymm2[6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2,3,4,5,6],ymm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm13[0],xmm3[1],xmm13[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,0,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm4[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5,6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm4[0],xmm3[1],xmm4[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,0,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 528(%rdi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3],ymm7[4,5,6,7,8,9,10],ymm6[11],ymm7[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 528(%rdi), %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3],ymm9[4,5,6,7,8,9,10],ymm8[11],ymm9[12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm8, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm11, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2],ymm11[3],ymm10[4,5],ymm11[6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm6, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm0[2],ymm11[3,4,5],ymm0[6],ymm11[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm21
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm18[0,1,0,2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm31[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 688(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm2[2],ymm11[3,4,5],ymm2[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 640(%rdi), %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm20[0,1,0,2]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm27[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 688(%rdi), %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm0[0],xmm2[1],xmm0[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,0,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm5, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm15, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4],ymm2[5,6,7,8,9,10,11],ymm4[12],ymm2[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4],ymm5[5,6,7,8,9,10,11],ymm6[12],ymm5[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm15
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm10[1],ymm1[2,3],ymm10[4],ymm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3,4,5],xmm5[6],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,10,11,8,9,6,7,20,21,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm6, %ymm4, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm4[1],xmm6[2,3,4,5],xmm4[6],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm17[0,1,1,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm13, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm24, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4],ymm6[5,6,7,8,9,10,11],ymm7[12],ymm6[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm8, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0],ymm15[1],ymm1[2,3],ymm15[4],ymm1[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm2, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,10,11,8,9,6,7,20,21,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm22[0,1,1,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,1,0,3,4,5,4,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,3,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1],ymm13[2,3],ymm14[4,5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm14, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4],ymm8[5,6,7,8,9,10,11],ymm9[12],ymm8[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm10, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1],xmm8[2,3,4,5],xmm9[6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm8, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm7, %ymm5, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm11, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2],ymm9[3],ymm11[4,5],ymm9[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2,3,4,5],xmm5[6],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm20[0,1,1,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,0,3,4,5,4,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm5, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3,4,5],xmm2[6],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm18[0,1,1,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm27
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[1,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm7, %ymm8, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm10[1],ymm1[2,3,4],ymm10[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[1,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm6, %ymm5, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0],ymm4[1],ymm12[2,3,4],ymm4[5],ymm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2],xmm7[3],xmm8[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm14[0,1,2],ymm13[3],ymm14[4,5],ymm13[6],ymm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[1,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm7, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm6, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0],ymm3[1],ymm9[2,3,4],ymm3[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0],ymm11[1],ymm14[2,3,4],ymm11[5],ymm14[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[1,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm9[1],ymm13[2,3,4],ymm9[5],ymm13[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm22[0,1,1,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm17[0,1,2,1,4,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm11, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm11, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm16, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm5, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0],ymm8[1],ymm11[2,3,4],ymm8[5],ymm11[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2],xmm2[3],xmm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm18[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm20[0,1,1,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[0,1,2,1,4,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 680(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm19, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm9[2],ymm13[3,4],ymm9[5],ymm13[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4],xmm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,8,9,6,7,4,5,2,3,2,3,2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm16[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 680(%rdi), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm3, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm3, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1],ymm11[2],ymm14[3,4],ymm11[5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,8,9,6,7,4,5,2,3,2,3,2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3,4,5],xmm1[6],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7],ymm5[8,9,10,11,12],ymm6[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm23, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3],xmm5[4],xmm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 656(%rdi), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,5],xmm2[6],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7],ymm6[8,9,10,11,12],ymm7[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm27, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0,1],ymm9[2,3],ymm13[4,5],ymm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4],xmm6[5],xmm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1],xmm1[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm6[0,1,2,3,4,5],xmm1[6],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7],ymm7[8,9,10,11,12],ymm8[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm18, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm10, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1],xmm7[2],xmm8[3],xmm7[4],xmm8[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 656(%rdi), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm7[0,1,2,3,4,5],xmm5[6],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7],ymm8[8,9,10,11,12],ymm9[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm21, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1],ymm4[2,3],ymm14[4,5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4],xmm8[5],xmm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0],xmm6[1],xmm1[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,9,10,11,8,9,6,7,4,5,4,5,4,5,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5,6,7],ymm6[8,9,10,11,12],ymm4[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5,6,7],ymm8[8,9,10,11,12],ymm6[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm6, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm15[3],ymm12[4,5],ymm15[6],ymm12[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm31[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm27[0,1,2,1,4,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm12[2,3],ymm11[4,5],ymm12[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[3],xmm3[4],xmm0[5],xmm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
+; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm15[2,3],ymm12[4,5],ymm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3],xmm2[4],xmm0[5],xmm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm5[1],xmm2[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm7[1],xmm5[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm1[2,3],ymm11[4,5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm11[2,3],ymm12[4,5],ymm11[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4],xmm0[5],xmm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 800(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 768(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 768(%rdi), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm3[3],ymm10[4,5],ymm3[6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7,8,9,10,11,12,13],ymm2[14],ymm1[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,1]
@@ -12994,8 +12983,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 832(%rdi), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 864(%rdi), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 864(%rdi), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm13[2],ymm5[3,4],ymm13[5],ymm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
@@ -13004,30 +12994,31 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2],ymm9[3],ymm15[4,5],ymm9[6],ymm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm4[3],ymm14[4,5],ymm4[6],ymm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm23
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1,2,1,4,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4],xmm0[5],xmm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2],ymm5[3],ymm14[4,5],ymm5[6],ymm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm7[3],ymm2[4,5],ymm7[6],ymm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6],ymm1[7,8,9,10,11,12,13],ymm4[14],ymm1[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
@@ -13038,6 +13029,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
@@ -13046,13 +13039,13 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm9[3],ymm2[4,5],ymm9[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm9[3],ymm14[4,5],ymm9[6],ymm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm14, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm14[1],ymm6[2,3],ymm14[4],ymm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm6, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2,3,4,5,6],ymm8[7,8],ymm1[9,10,11,12,13,14],ymm8[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
@@ -13061,11 +13054,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm30[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm27
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm27[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
@@ -13073,20 +13064,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm4[3],ymm11[4,5],ymm4[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0],ymm3[1],ymm15[2,3],ymm3[4],ymm15[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0],ymm10[1],ymm15[2,3],ymm10[4],ymm15[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2,3,4,5,6],ymm8[7,8],ymm1[9,10,11,12,13,14],ymm8[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm13[2,3],ymm2[4,5],ymm13[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,0,2,4,5,6,7]
@@ -13096,39 +13086,39 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0],ymm14[1],ymm10[2,3,4],ymm14[5],ymm10[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm14[1],ymm7[2,3,4],ymm14[5],ymm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <4,5,u,u,0,1,14,15,8,9,10,11,4,5,6,7,20,21,u,u,16,17,30,31,24,25,26,27,20,21,22,23>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3,4,5,6,7,8],ymm0[9],ymm8[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm5[2],ymm2[3,4,5],ymm5[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1],ymm4[2],ymm3[3,4,5],ymm4[6],ymm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $242, %ymm0, %ymm18, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $242, %ymm0, %ymm22, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm27
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm30[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm27[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm15, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0],ymm10[1],ymm15[2,3,4],ymm10[5],ymm15[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4,5,6,7,8],ymm0[9],ymm1[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm11[2],ymm4[3,4,5],ymm11[6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm12[2],ymm11[3,4,5],ymm12[6],ymm11[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $242, %ymm0, %ymm18, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm13[3],ymm12[4,5],ymm13[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $242, %ymm0, %ymm22, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm13[3],ymm2[4,5],ymm13[6],ymm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
@@ -13137,9 +13127,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,0,4,5,6,7]
@@ -13151,21 +13141,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # xmm1 = mem[0,1,2,3,6,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm5[3],ymm2[4,5],ymm5[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm7[2],ymm6[3,4,5],ymm7[6],ymm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm6, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm10, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1],ymm2[2],ymm10[3,4],ymm2[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm7[0,1],ymm2[2],ymm7[3,4],ymm2[5],ymm7[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[1,1,2,0]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
@@ -13176,9 +13161,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0,1,2],ymm1[3,4,5,6,7],ymm8[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,0,4,5,6,7]
@@ -13190,20 +13175,20 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # xmm1 = mem[0,1,2,3,6,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm11[3],ymm4[4,5],ymm11[6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm21
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1],ymm3[2],ymm14[3,4],ymm3[5],ymm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,2,0]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm12[2],ymm13[3,4,5],ymm12[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm14[2],ymm13[3,4,5],ymm14[6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm19
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
@@ -13215,18 +13200,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm0[2],ymm12[3,4,5],ymm0[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm12[2],ymm10[3,4,5],ymm12[6],ymm10[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <8,9,8,9,8,9,8,9,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm22
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6],ymm8[7,8,9,10,11,12,13],ymm0[14],ymm8[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm10[2],ymm1[3,4],ymm10[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm11[2],ymm13[3,4],ymm11[5],ymm13[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[2,1,2,3]
@@ -13234,18 +13217,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm6
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm24
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0],xmm0[1],xmm9[2,3,4,5],xmm0[6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm7, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm2[2,3],ymm7[4,5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm30
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm7[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1,2],ymm14[3],ymm9[4,5,6,7,8,9,10],ymm14[11],ymm9[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0],ymm1[1],ymm15[2,3],ymm1[4],ymm15[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
@@ -13258,39 +13242,42 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm9, %ymm14, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4,5],ymm11[6],ymm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4,5],ymm0[6],mem[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm0[6],ymm9[7,8,9,10,11,12,13],ymm0[14],ymm9[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm9[0,1],mem[2],ymm9[3,4],mem[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = ymm8[0,1],mem[2],ymm8[3,4],mem[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3,4,5],xmm0[6],xmm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm8[0,1],ymm2[2,3],ymm8[4,5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm22[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm20, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm7[0,1],ymm2[2,3],ymm7[4,5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm26[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm26, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3],ymm14[4,5,6,7,8,9,10],ymm15[11],ymm14[12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -13299,61 +13286,62 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm14, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3],ymm13[4,5],ymm0[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1,2],ymm0[3],mem[4,5],ymm0[6],mem[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3],ymm8[4],ymm1[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3,4,5],xmm14[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm8[0],ymm9[1],ymm8[2,3,4],ymm9[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3],xmm15[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,12,13,26,27,24,25,22,23,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm14, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm12[3],ymm10[4,5],ymm12[6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7,8],ymm0[9,10,11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm11[2,3],ymm13[4,5],ymm11[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [10,11,10,11,10,11,10,11,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [10,11,10,11,10,11,10,11,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm2[3],ymm7[4,5],ymm2[6],ymm7[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4],ymm0[5,6,7,8,9,10,11],ymm13[12],ymm0[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2,3,4,5],xmm15[6],xmm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm10[0],ymm3[1],ymm10[2,3,4],ymm3[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm29
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1],xmm12[2],xmm15[3],xmm12[4,5,6,7]
@@ -13363,13 +13351,15 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm12, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm11[3],ymm7[4,5],ymm11[6],ymm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm15 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm15[3],ymm14[4,5],ymm15[6],ymm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0],ymm0[1,2,3,4,5,6],ymm12[7,8],ymm0[9,10,11,12,13,14],ymm12[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm9[2,3],ymm1[4,5],ymm9[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,3,4,5,6,7]
@@ -13377,14 +13367,14 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0],ymm0[1],ymm10[2,3],ymm0[4],ymm10[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm12[1],ymm0[2,3,4,5,6,7,8],ymm12[9],ymm0[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm9[0,1,2],ymm2[3],ymm9[4,5],ymm2[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm11[0,1,2],ymm2[3],ymm11[4,5],ymm2[6],ymm11[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
@@ -13393,21 +13383,22 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [12,13,12,13,12,13,12,13,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3],xmm4[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,0,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4,5],ymm5[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4,5],mem[6],ymm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,3,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
@@ -13417,111 +13408,106 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm6[1],ymm3[2,3,4,5,6,7,8],ymm6[9],ymm3[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm3, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3],ymm15[4,5],ymm14[6],ymm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm14[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3,4,5,6,7,8],ymm15[9],ymm14[10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm14, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2],ymm9[3],ymm1[4,5],ymm9[6],ymm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm3[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm30, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2,3,4],ymm8[5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2],xmm3[3],xmm11[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1],ymm4[2],ymm10[3,4],ymm4[5],ymm10[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1],ymm13[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm27, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm8[2],ymm9[3,4,5],ymm8[6],ymm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm14 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm15 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
-; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm11, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm11, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0],ymm1[1],ymm9[2,3,4],ymm1[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm10[1],xmm13[2],xmm10[3],xmm13[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm10, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm1[2],ymm9[3,4],ymm1[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4,5],mem[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm1 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm16, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm11, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm14, %zmm11, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm15, %zmm11, %zmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,3,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm10, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm9 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm13, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm13, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,3,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm11, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm10[1,2],ymm5[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7],ymm5[8,9,10],ymm4[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm21 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm21 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm26 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm9 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm1 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm9 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm4 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm19 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm10, %zmm19 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm17 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm11, %zmm17 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm10, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm18 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm11, %zmm18 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm16 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm13, %zmm16 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm14, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 64(%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 64(%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 64(%rdx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, (%rdx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 64(%rcx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rcx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, 64(%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%r8)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm15, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm21, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 64(%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm15, %zmm22 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm21, %zmm8 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, 64(%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm0 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm23, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm26, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm0 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm0 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm0 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    addq $1800, %rsp # imm = 0x708
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
@@ -13529,427 +13515,439 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1864, %rsp # imm = 0x748
+; AVX512F-ONLY-FAST-NEXT:    subq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <2,5,9,u,12,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm16, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <2,5,9,u,12,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm19, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 672(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm31 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 480(%rdi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm5[2],ymm7[3,4,5],ymm5[6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm23[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm15, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 672(%rdi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm18[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm6[2],ymm12[3,4,5],ymm6[6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm6[3],ymm12[4,5],ymm6[6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3,4,5],xmm1[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm12[2],ymm6[3,4,5],ymm12[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm13, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm12[3],ymm7[4,5],ymm12[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm7, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm1[1],xmm5[2,3,4,5],xmm1[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm3[2],ymm11[3,4,5],ymm3[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm7[0,1,2,3,4,5,6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm13[0],xmm15[1],xmm13[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1],ymm10[2],ymm11[3,4,5],ymm10[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0],xmm13[1],xmm14[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2,3,4,5],xmm6[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm19, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm8[4],xmm4[5],xmm8[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm22[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0,1,2,3,4,5,6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm2[0],xmm10[1],xmm2[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <2,6,9,u,13,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm6[1],xmm5[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <2,6,9,u,13,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm16, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm3, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm8, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0,1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm8, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm27
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2,3,4,5],xmm4[6],xmm8[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm12 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm23, %ymm12, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm6[0,1,2,3,4,5,6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm13, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm16, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm5, %ymm9, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2],ymm3[3],ymm14[4,5],ymm3[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2,3,4,5],xmm5[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm22, %ymm12, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [21474836482,21474836482,21474836482,21474836482]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm11, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1,2,3,4,5,6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm14, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm9, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1],xmm8[2],xmm5[3],xmm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [10,3,6,15,12,13,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm16, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm12[0,1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3,4,5],xmm3[6],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm11, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm15[1],ymm0[2,3,4],ymm15[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0],xmm3[1],xmm8[2],xmm3[3],xmm8[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [10,3,6,15,12,13,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm20, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm5, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm9, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2,3,4,5],xmm9[6],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm11[1],ymm5[2,3],ymm11[4],ymm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm9[1],xmm3[2,3,4,5],xmm9[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm23[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm15[0,1,2,3,4,5,6],ymm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm18[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3,4,5,6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm10, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0],ymm11[1],ymm13[2,3,4],ymm11[5],ymm13[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm10[0],xmm6[1],xmm10[2],xmm6[3],xmm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm24, %zmm16, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm10, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm8, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm20, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm4, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm22[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm26[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm8[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm18, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [3,6,10,13,3,6,10,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm24, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5,6],ymm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2,3,4],ymm11[5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm19, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [3,6,10,13,3,6,10,13]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm25, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
+; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm16 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm19, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,3,3,3,0,3,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm23, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [2,5,9,12,2,5,9,12]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm13, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm28, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,3,3,3,0,3,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [2,5,9,12,2,5,9,12]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm8, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5,6,7],ymm9[8,9,10,11,12],ymm6[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm7, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm22, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm26
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm17 = [0,4,7,0,0,4,7,0]
-; AVX512F-ONLY-FAST-NEXT:    # ymm17 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm23, %ymm17, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm18, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7],ymm4[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0,1,2,3,4],ymm3[5,6,7],ymm7[8,9,10,11,12],ymm3[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm13, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm3, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm14[2,3],ymm12[4,5],ymm14[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3],xmm4[4],xmm0[5],xmm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,u,u,u,4,7,11,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[3,4,5,6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm13, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm0[2,3],ymm7[4,5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3],xmm7[4],xmm3[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,u,u,u,4,7,11,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm14, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm24, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm25, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm8, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm17, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3],xmm15[4],xmm0[5],xmm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3,4,5,6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm6, %zmm13, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [2,6,9,13,2,6,9,13]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm11, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6,7],ymm4[8,9,10,11,12],ymm2[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm8, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm1[2,3],ymm12[4,5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3],xmm7[4],xmm5[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm14, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3,4,5,6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm8, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1,2],ymm1[3],ymm12[4,5],ymm1[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <1,u,u,u,4,8,11,15>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm11, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3],xmm0[4],xmm7[5],xmm0[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <1,u,u,u,4,8,11,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm13, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm15, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3,4,5,6],xmm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm6, %zmm18, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm14[3],ymm12[4,5],ymm14[6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6],xmm15[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm13, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2],xmm0[3,4,5,6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm18, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm15, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3,4,5,6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm11, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm9[2],ymm10[3,4,5],ymm9[6],ymm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,u,u,u,5,8,12,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm7, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm8, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm6, %zmm24, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm12[2],ymm5[3,4,5],ymm12[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,u,u,u,5,8,12,15>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm6, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm25, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm12[2],ymm14[3,4,5],ymm12[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm24, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm2[2],ymm4[3,4,5],ymm2[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm25, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm22, %ymm17, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm10, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm11, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm11[2],ymm7[3,4,5],ymm11[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm13[2],ymm2[3,4,5],ymm13[6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm9[3],ymm10[4,5],ymm9[6],ymm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm12[3],ymm5[4,5],ymm12[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [2,11,2,11,12,5,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm13, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [2,11,2,11,12,5,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
@@ -13958,297 +13956,296 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 832(%rdi), %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm1[2],ymm8[3,4,5],ymm1[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm13, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm12[3],ymm3[4,5],ymm12[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm2[1],xmm13[2,3,4,5],xmm2[6],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm14[3],ymm4[4,5],ymm14[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0],xmm2[1],xmm9[2,3,4,5],xmm2[6],xmm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm25
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,3,7,10,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm18, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm13[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm19, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm11[3],ymm7[4,5],ymm11[6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5],xmm1[6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,3,7,10,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm19, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1],xmm9[2,3,4,5],xmm1[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm14[1],xmm9[2,3,4,5],xmm14[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,u,u,u,6,9,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <2,u,u,u,6,9,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm15, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm14, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm9, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm19, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm8, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm17, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm15, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1],xmm2[2,3,4,5],xmm14[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3,4,5],xmm9[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm11[2,3],ymm10[4,5],ymm11[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm30
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <0,4,7,11,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm7[1],ymm11[2,3],ymm7[4],ymm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2],xmm1[3],xmm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <3,u,u,u,6,10,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm18, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7],ymm13[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm13, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm12[2,3],ymm15[4,5],ymm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,4,7,11,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm14[1],xmm0[2,3,4,5],xmm14[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0],ymm12[1],ymm13[2,3,4],ymm12[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3],xmm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,u,u,u,6,10,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm10, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm14, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm17, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm15[1],xmm0[2,3,4,5],xmm15[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm18, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1],xmm12[2],xmm15[3],xmm12[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3,4,5],xmm11[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm10[1],xmm15[2],xmm10[3],xmm15[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm14, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7],ymm9[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm10, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm12, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <1,4,8,11,15,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm12, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,8,11,15,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm10, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm26
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0],ymm7[1],ymm11[2,3,4],ymm7[5],ymm11[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,10,3,14,7,10,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm21, %zmm7, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1,2],ymm5[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3],ymm3[4,5],ymm9[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm14, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm20, %zmm12, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0],ymm8[1],ymm4[2,3,4],ymm8[5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3],xmm9[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm10[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm4[1],ymm8[2,3,4],ymm4[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm4[2],ymm8[3,4],ymm4[5],ymm8[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm8[2],ymm4[3,4],ymm8[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm11 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm7, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm9, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm15, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm9, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm9, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm12 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm13 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm17 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm11 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm10, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm10, %zmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm4[1,2],ymm7[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0,1,2],ymm5[3,4,5,6,7],ymm4[8,9,10],ymm5[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm18 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm12 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm10 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm10 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm19
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm18 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm12 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm7 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm7 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm10, %zmm9 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm7 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm10, %zmm7 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm11, %zmm10 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm16, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm10 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm13, %zmm10 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm14, %zmm13 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm15, %zmm14 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm1 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm19, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 64(%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, (%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 64(%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 64(%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm15, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm16, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm0 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1864, %rsp # imm = 0x748
+; AVX512F-ONLY-FAST-NEXT:    addq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i16_stride7_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $1464, %rsp # imm = 0x5B8
+; AVX512DQ-SLOW-NEXT:    subq $1496, %rsp # imm = 0x5D8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 480(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
@@ -14274,7 +14271,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm5
@@ -14299,7 +14296,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[0,1,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm20
@@ -14327,7 +14324,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm10[0],xmm15[1],xmm10[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,0,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm6, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 528(%rdi), %xmm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm12
@@ -14363,7 +14360,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm25
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,0,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm4, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm3
@@ -14402,7 +14399,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm31
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm31
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -14435,7 +14432,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm4, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm1[1],ymm14[2,3,4],ymm1[5],ymm14[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
@@ -14473,7 +14470,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpsrlq $48, %xmm17, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm6, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm14
@@ -14490,7 +14487,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpsrlq $48, %xmm25, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
@@ -14508,7 +14505,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpsrld $16, %xmm20, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm14[2],ymm3[3,4],ymm14[5],ymm3[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
@@ -14526,7 +14523,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpsrld $16, %xmm24, %xmm9
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm15
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm5
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
@@ -14542,7 +14539,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm6, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm14[3],ymm3[4,5],ymm14[6],ymm3[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
@@ -14557,10 +14554,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 {%k1} # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 {%k1} # 16-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm14[2,3],ymm3[4,5],ymm14[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm25
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm16
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
@@ -14574,7 +14570,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm2
@@ -14607,7 +14603,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm21
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
@@ -14621,7 +14617,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 {%k1} # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 {%k1} # 16-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm3
@@ -14767,7 +14763,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # xmm7 = mem[0,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm17
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm1, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm6
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2],ymm6[3],ymm14[4,5],ymm6[6],ymm14[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm30
@@ -14795,7 +14791,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm17 # 64-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm17 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm7
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3,4,5],xmm7[6],xmm1[7]
@@ -14808,7 +14804,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # xmm7 = mem[0,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm16
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm1, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm15[3],ymm0[4,5],ymm15[6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm28
@@ -14850,17 +14846,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1],ymm1[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0,1,2],ymm14[3],ymm3[4,5],ymm14[6],ymm3[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm26
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm13
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0],xmm11[1],xmm13[2,3,4,5],xmm11[6],xmm13[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1],ymm12[2,3],ymm0[4,5],ymm12[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm22[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm24
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3],ymm13[4,5,6,7,8,9,10],ymm14[11],ymm13[12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm31
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2,3,4,5],xmm12[6],xmm14[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
@@ -14871,7 +14868,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm13, %ymm12, %ymm12
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm30
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm30 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm11, %zmm0, %zmm1 {%k1}
@@ -14891,8 +14888,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm20, %zmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm20, %zmm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
@@ -14904,7 +14901,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm10[0,1],ymm14[2,3],ymm10[4,5],ymm14[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm21[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm29
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3],ymm12[4,5,6,7,8,9,10],ymm13[11],ymm12[12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm7, %ymm12, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
@@ -14917,19 +14914,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm7, %ymm12, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm7, %zmm0, %zmm26 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm7, %zmm0, %zmm19 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm7 = ymm7[0,1,2],mem[3],ymm7[4,5],mem[6],ymm7[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm7[2,3,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4],ymm7[5,6,7,8,9,10,11],ymm11[12],ymm7[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm11
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm10
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm10[0],ymm2[1],ymm10[2,3,4],ymm2[5],ymm10[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm13
@@ -14938,7 +14935,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm22
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm7[0,1,2],ymm11[3,4,5,6,7],ymm7[8,9,10],ymm11[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
@@ -14969,15 +14966,15 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm12[4],ymm7[5,6,7,8,9,10,11],ymm12[12],ymm7[13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm22
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm10
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2],xmm14[3],xmm10[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
@@ -15019,12 +15016,12 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,13,12,13,12,13,12,13,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm8[0,1],ymm7[2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3],xmm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
@@ -15032,7 +15029,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4,5],ymm0[6],mem[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,3,1]
@@ -15059,33 +15056,32 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm20, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2,3,4],ymm6[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm20, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0],xmm0[1],xmm6[2],xmm0[3],xmm6[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[0,1],ymm6[2],mem[3,4,5],ymm6[6],mem[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd $187, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm6 = mem[0,1],ymm1[2],mem[3,4,5],ymm1[6],mem[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,3,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm7 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 64-byte Folded Reload
@@ -15115,27 +15111,26 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm12, 64(%rax)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm14, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $1464, %rsp # imm = 0x5B8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    addq $1496, %rsp # imm = 0x5D8
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1240, %rsp # imm = 0x4D8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
+; AVX512DQ-FAST-NEXT:    subq $1224, %rsp # imm = 0x4C8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,5,9,u,12,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm17, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm17, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm18
-; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa 480(%rdi), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm13
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm5[2],ymm13[3,4,5],ymm5[6],ymm13[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm3, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 480(%rdi), %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm15[2],ymm6[3,4,5],ymm15[6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm22
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -15149,159 +15144,162 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
 ; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm23
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm23[0,1,0,2]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm23[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm14
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm14[2],ymm12[3,4,5],ymm14[6],ymm12[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm13[2],ymm6[3,4,5],ymm13[6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm6, %ymm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
-; AVX512DQ-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm9
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm12
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm15
+; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm14[3],ymm12[4,5],ymm14[6],ymm12[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm7, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm10[2],ymm11[3,4,5],ymm10[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm5[2],ymm10[3,4,5],ymm5[6],ymm10[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm7
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm6[0,1,2,3,4,5,6],ymm7[7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm15[0],xmm9[1],xmm15[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0],xmm12[1],xmm0[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0,1,2],ymm0[3],ymm13[4,5],ymm0[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1,2],ymm15[3],ymm0[4,5],ymm15[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm27
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4],xmm2[5],xmm7[6],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm26
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm31[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[0,1,0,2]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm14[1],xmm7[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <2,6,9,u,13,u,u,u>
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,6,9,u,13,u,u,u>
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm13, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm3, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm28
+; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm5[3],ymm10[4,5],ymm5[6],ymm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm24
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3,4,5],xmm4[6],xmm7[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm29
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [21474836482,21474836482,21474836482,21474836482]
 ; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm10, %ymm11
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5,6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm15, %xmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm12
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm0[0],ymm13[1],ymm0[2,3],ymm13[4],ymm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm22
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm12
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm27, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm11, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm3, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm8, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1,2],ymm15[3],ymm9[4,5],ymm15[6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3,4,5],xmm3[6],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermd %ymm31, %ymm10, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm10, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm6
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm7
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm2[1],ymm5[2,3,4],ymm2[5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm6
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm14, %xmm30
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm27
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2],xmm3[3],xmm7[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm17, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm8, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm9[1],ymm1[2,3],ymm9[4],ymm1[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2,3,4,5],xmm8[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
@@ -15313,27 +15311,28 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm12[0,1,2,3,4,5,6],ymm14[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm24
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0],ymm13[1],ymm0[2,3,4],ymm13[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0],ymm13[1],ymm15[2,3,4],ymm13[5],ymm15[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0],xmm4[1],xmm14[2],xmm4[3],xmm14[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm11
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm11
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm11, %ymm7
 ; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0],ymm0[1],ymm15[2,3],ymm0[4],ymm15[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm5, %ymm0
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm31[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm26[0,1,1,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm27
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm31
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
@@ -15343,24 +15342,24 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm11
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm18, %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm8
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm22, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm19
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm1, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm1, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm16 {%k1} # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm16 {%k1} # 16-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0],ymm0[1],ymm15[2,3,4],ymm0[5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm0[1],ymm11[2,3,4],ymm0[5],ymm11[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
@@ -15368,10 +15367,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm8
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm22, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm9
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm20, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm29
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,3,3,3,0,3,7,7]
 ; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm4, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
@@ -15382,22 +15381,23 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2,3,4],ymm1[5,6,7],ymm5[8,9,10,11,12],ymm1[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm14, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm11, %xmm5
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm5[2,3],ymm10[4,5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm20
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3],xmm5[4],xmm1[5],xmm5[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,u,u,u,4,7,11,14>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm16, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm10
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3,4,5,6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm10[4,5,6,7]
@@ -15406,18 +15406,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm13, %zmm12
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm25 {%k1} # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm31, %ymm4, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm1 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm6, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm6, %zmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7],ymm3[8,9,10,11,12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm9, %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm8, %xmm18
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm30, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm22
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
 ; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
@@ -15427,24 +15427,23 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm10, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7],ymm0[8,9,10,11,12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm14[4],xmm11[4],xmm14[5],xmm11[5],xmm14[6],xmm11[6],xmm14[7],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm6, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm6, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1],ymm13[2,3],ymm11[4,5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0,1,2],xmm5[3],xmm15[4],xmm5[5],xmm15[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm17
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm14
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
@@ -15453,9 +15452,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm16
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm6, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3,4,5],ymm6[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3,4,5],ymm6[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0,1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm5
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1,2,3],xmm6[4],xmm14[5],xmm6[6],xmm14[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
@@ -15469,23 +15469,24 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm10, %zmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm15
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm15[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm15[0,1,2,3],xmm6[4],xmm15[5],xmm6[6],xmm15[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3,4,5,6],xmm4[7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm10, %zmm6
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm10, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm11[2],ymm13[3,4,5],ymm11[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm13[2],ymm5[3,4,5],ymm13[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm5, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
@@ -15495,49 +15496,50 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm20, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm5, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4,5],ymm0[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm6 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm6
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm15[4],xmm1[5],xmm15[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm20, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm5, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm31, %ymm12, %ymm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm27 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm12, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm10, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm10, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1],ymm1[2],ymm7[3,4,5],ymm1[6],ymm7[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm1[2],ymm5[3,4,5],ymm1[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm4
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2],ymm11[3],ymm13[4,5],ymm11[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2,3,4,5],xmm1[6],xmm8[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
@@ -15551,21 +15553,22 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm10, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm24 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm24 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm21 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm21 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm1[2],ymm10[3,4,5],ymm1[6],ymm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm1[2],ymm3[3,4,5],ymm1[6],ymm3[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm12, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm12, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm22
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm19
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm12[0],xmm8[1],xmm12[2,3,4,5],xmm8[6],xmm12[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -15573,31 +15576,33 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm27 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,3,7,10,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm8, %zmm14
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm8, %zmm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm14[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm30
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm2[3],ymm7[4,5],ymm2[6],ymm7[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm31
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm31
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5],xmm1[6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm11[0],ymm13[1],ymm11[2,3],ymm13[4],ymm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0],ymm15[1],ymm13[2,3],ymm15[4],ymm13[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm15, %ymm21
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
@@ -15611,31 +15616,32 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm14, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm30 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm14[2],ymm2[3,4],ymm14[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm28
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm8, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm29
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm3[3],ymm10[4,5],ymm3[6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0],ymm3[1],ymm10[2,3],ymm3[4],ymm10[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm30
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -15648,22 +15654,22 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm24
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm14, %xmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,4,7,11,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm19, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm19, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm20
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm15[1],xmm0[2,3,4,5],xmm15[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm11[0],ymm13[1],ymm11[2,3,4],ymm13[5],ymm11[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1],xmm12[2],xmm15[3],xmm12[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
@@ -15679,26 +15685,26 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm14[2,3],ymm2[4,5],ymm14[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm4[2,3],ymm2[4,5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm19, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm19, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm8
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0,1],ymm8[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm11[1],ymm7[2,3],ymm11[4],ymm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2,3,4,5],xmm8[6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm16, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm8
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm15
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
@@ -15719,16 +15725,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,4,8,11,15,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm6, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm6, %zmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm9[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2,3,4],ymm7[5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm10[2],ymm13[3,4],ymm10[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
@@ -15744,37 +15751,37 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm12[3],ymm2[4,5],ymm12[6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm21, %zmm6, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm6, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0],ymm10[1],ymm3[2,3,4],ymm10[5],ymm3[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0],ymm11[1],ymm3[2,3,4],ymm11[5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2],xmm4[3],xmm10[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1],ymm3[2],ymm15[3,4],ymm3[5],ymm15[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,1,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm3[1,2],ymm7[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm3[0,1,2],ymm4[3,4,5,6,7],ymm3[8,9,10],ymm4[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm31, %zmm25, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm2 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
@@ -15799,14 +15806,14 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%r8)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $1240, %rsp # imm = 0x4D8
+; AVX512DQ-FAST-NEXT:    addq $1224, %rsp # imm = 0x4C8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 533f7c07a1553..95800b30987ae 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -4453,7 +4453,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-LABEL: load_i8_stride6_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $136, %rsp
+; AVX512F-NEXT:    subq $88, %rsp
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
 ; AVX512F-NEXT:    vmovdqa64 224(%rdi), %ymm29
@@ -4478,7 +4478,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %xmm4, %xmm12, %xmm9
 ; AVX512F-NEXT:    vpor %xmm8, %xmm9, %xmm8
 ; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm8, %zmm1
 ; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm14
 ; AVX512F-NEXT:    vpternlogq $202, %ymm30, %ymm28, %ymm14
@@ -4500,7 +4500,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm10, %xmm11, %xmm11
 ; AVX512F-NEXT:    vpor %xmm4, %xmm11, %xmm2
-; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
 ; AVX512F-NEXT:    vpshufb %xmm4, %xmm13, %xmm11
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
@@ -4523,7 +4523,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
 ; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm0
 ; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm27, %ymm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
@@ -4553,7 +4553,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm10, %xmm10
 ; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm18
 ; AVX512F-NEXT:    vpor %xmm8, %xmm10, %xmm1
-; AVX512F-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
 ; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
@@ -4590,7 +4590,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512F-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm15
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm26
 ; AVX512F-NEXT:    vpternlogq $184, %zmm15, %zmm5, %zmm26
@@ -4673,7 +4673,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpternlogq $242, %ymm5, %ymm13, %ymm9
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm5
 ; AVX512F-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm9
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm29, %zmm9, %zmm9
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm29, %zmm9, %zmm9
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-NEXT:    vpternlogq $226, %zmm9, %zmm15, %zmm4
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
@@ -4690,15 +4690,15 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpternlogq $242, %ymm4, %ymm13, %ymm7
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm4
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm27, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpternlogq $226, %zmm0, %zmm15, %zmm3
 ; AVX512F-NEXT:    vpternlogq $184, %zmm3, %zmm9, %zmm4
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm12
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm14
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-NEXT:    vpternlogq $184, %zmm12, %zmm0, %zmm2
@@ -4710,7 +4710,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
-; AVX512F-NEXT:    addq $136, %rsp
+; AVX512F-NEXT:    addq $88, %rsp
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4741,7 +4741,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm18, %xmm15, %xmm13
 ; AVX512BW-NEXT:    vpor %xmm11, %xmm13, %xmm11
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm5, %zmm11, %zmm11
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm5, %zmm11, %zmm11
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
 ; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm6, %ymm13
 ; AVX512BW-NEXT:    movw $-28124, %r10w # imm = 0x9224
@@ -4787,7 +4787,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
 ; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm15, %zmm8
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm8, %zmm15, %zmm8
 ; AVX512BW-NEXT:    vpshufb %xmm7, %xmm20, %xmm7
 ; AVX512BW-NEXT:    vpshufb %xmm9, %xmm21, %xmm9
 ; AVX512BW-NEXT:    vpor %xmm7, %xmm9, %xmm9
@@ -4830,7 +4830,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm25, %xmm24, %xmm27
 ; AVX512BW-NEXT:    vporq %xmm12, %xmm27, %xmm12
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm12
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm7, %zmm12, %zmm12
 ; AVX512BW-NEXT:    movl $2097151, %edi # imm = 0x1FFFFF
 ; AVX512BW-NEXT:    kmovq %rdi, %k6
 ; AVX512BW-NEXT:    vmovdqu8 %zmm18, %zmm12 {%k6}
@@ -4864,7 +4864,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm17, %xmm24, %xmm19
 ; AVX512BW-NEXT:    vporq %xmm16, %xmm19, %xmm16
 ; AVX512BW-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm13, %zmm16, %zmm13
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm13, %zmm16, %zmm13
 ; AVX512BW-NEXT:    vmovdqu8 %zmm8, %zmm13 {%k6}
 ; AVX512BW-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
 ; AVX512BW-NEXT:    vpshufb %xmm17, %xmm27, %xmm15
@@ -4897,7 +4897,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm19, %xmm1, %xmm20
 ; AVX512BW-NEXT:    vporq %xmm18, %xmm20, %xmm18
 ; AVX512BW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm14, %zmm18, %zmm14
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm14, %zmm18, %zmm14
 ; AVX512BW-NEXT:    movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
 ; AVX512BW-NEXT:    kmovq %rdi, %k1
 ; AVX512BW-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k1}
@@ -4931,7 +4931,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpor %xmm1, %xmm8, %xmm1
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
 ; AVX512BW-NEXT:    vpshufb %ymm7, %ymm4, %ymm0
 ; AVX512BW-NEXT:    vpshufb %xmm5, %xmm11, %xmm1

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index d3e8b9f2b21b0..6ad3c95d376fe 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -8946,26 +8946,26 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm3
@@ -8974,33 +8974,33 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm25
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm14, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm22, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm22, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 288(%rdi), %ymm19
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4,5],ymm5[6],ymm3[7,8,9],ymm5[10],ymm3[11,12,13],ymm5[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4,5],ymm5[6],ymm2[7,8,9],ymm5[10],ymm2[11,12,13],ymm5[14],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm1, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm21
@@ -9012,11 +9012,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm9, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm9, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm30, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm29, %ymm24, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
@@ -9045,7 +9045,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm13[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm10[1,8,15],zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm3
@@ -9101,7 +9101,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm13[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
@@ -9154,7 +9154,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm10[3,10],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm13[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm6, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm3
@@ -9390,7 +9390,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm10, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 288(%rdi), %ymm17
@@ -9451,7 +9451,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm4
@@ -9507,7 +9507,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm7[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm28, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm2
@@ -9560,7 +9560,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm8[3,10],zero,zero,zero,xmm8[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,xmm9[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm7, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm6, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm28, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm2
@@ -9754,398 +9754,401 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512DQ-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm26
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm11, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm7
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm7
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm10[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm10
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm10[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm10[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, %xmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[0,7,14],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm26, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm14, %zmm1, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm22, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 288(%rdi), %ymm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm1, %xmm15, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm13[0,1],ymm5[2],ymm13[3,4,5],ymm5[6],ymm13[7,8,9],ymm5[10],ymm13[11,12,13],ymm5[14],ymm13[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5],ymm4[6],ymm2[7,8,9],ymm4[10],ymm2[11,12,13],ymm4[14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm1, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm13
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm13[4,11],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm4[4,11],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm30, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm4[2],ymm5[3,4,5],ymm4[6],ymm5[7,8,9],ymm4[10],ymm5[11,12,13],ymm4[14],ymm5[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm1[2],ymm5[3,4,5],ymm1[6],ymm5[7,8,9],ymm1[10],ymm5[11,12,13],ymm1[14],ymm5[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm13[0],xmm6[0],xmm13[1],xmm6[1],xmm13[2],xmm6[2],xmm13[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm6[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[1,8,15],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm26, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm5
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm6[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm13[3],ymm6[4,5],ymm13[6],ymm6[7,8,9,10],ymm13[11],ymm6[12,13],ymm13[14],ymm6[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm5, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm5[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm13, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm6, %ymm24, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm12[1,8,15],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm22, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm2
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm5, %ymm2, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[1,8,15,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm13, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,xmm10[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm13, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm28, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm6
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u],zero,zero,xmm13[3,10],zero,zero,zero,xmm13[6,13,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5,6],ymm2[7,8],ymm13[9,10],ymm2[11],ymm13[12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm6, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm6[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm29, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0],ymm5[1,2],ymm4[3],ymm5[4,5,6],ymm4[7,8],ymm5[9,10],ymm4[11],ymm5[12,13,14],ymm4[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm30, %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[5,12]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm4, %ymm24, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm2, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[3,10],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,xmm10[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm13, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm6[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm13[0],ymm6[1,2,3],ymm13[4],ymm6[5,6],ymm13[7,8],ymm6[9,10,11],ymm13[12],ymm6[13,14],ymm13[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm5, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm5
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,u],zero,zero,xmm13[2,9],zero,zero,zero,xmm13[5,12]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm13, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm6, %ymm24, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm7[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm28, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7,8],ymm6[9],ymm5[10,11],ymm6[12],ymm5[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3],ymm5[4,5,6],ymm2[7,8],ymm5[9,10],ymm2[11],ymm5[12,13,14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1,2],ymm1[3],ymm2[4,5,6],ymm1[7,8],ymm2[9,10],ymm1[11],ymm2[12,13,14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm23, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[5,12]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm3, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm5
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[3,10],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm7[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm28, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3],ymm5[4],ymm4[5,6],ymm5[7,8],ymm4[9,10,11],ymm5[12],ymm4[13,14],ymm5[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[3,10],zero,zero,zero,xmm6[6,13]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vporq %xmm5, %xmm2, %xmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7,8],ymm6[9],ymm5[10,11,12],ymm6[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vporq %xmm5, %xmm2, %xmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm8, %xmm13, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm22, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm4, %ymm24, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7,8],ymm4[9],ymm2[10,11],ymm4[12],ymm2[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm27
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4],ymm4[5],ymm2[6,7,8],ymm4[9],ymm2[10,11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm30
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vporq %xmm2, %xmm0, %xmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm19, %ymm9, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm11, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm11, %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6],ymm0[7,8],ymm2[9,10,11],ymm0[12],ymm2[13,14],ymm0[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6],ymm1[7,8],ymm2[9,10,11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm29, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm3, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm11
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm20, %ymm8
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm11
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm23, %ymm5
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm11
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[5,12],zero,zero,xmm5[1,8,15],zero,zero,xmm5[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm11, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6,7,8],ymm0[9],ymm13[10,11],ymm0[12],ymm13[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm5, %ymm11
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm11, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm1[1],ymm8[2,3],ymm1[4],ymm8[5,6,7,8],ymm1[9],ymm8[10,11],ymm1[12],ymm8[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm13[6,13],zero,zero,xmm13[2,9],zero,zero,zero,xmm13[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0],ymm0[1],ymm9[2,3,4],ymm0[5],ymm9[6,7,8],ymm0[9],ymm9[10,11,12],ymm0[13],ymm9[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm3, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm12, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm8, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm6, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm17, %ymm1, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm9[0],ymm1[1],ymm9[2,3,4],ymm1[5],ymm9[6,7,8],ymm1[9],ymm9[10,11,12],ymm1[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm9[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm17, %ymm10, %ymm15
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[0,7,14]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm13, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm13, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm20, %ymm6
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,11],zero,zero,xmm1[0,7,14],zero,zero,xmm1[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm10[u,u,4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[1,8,15]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm7, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm10[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm9[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm12[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm18, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm18, %zmm2
 ; AVX512DQ-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm30, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm14[u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm5, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm15[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4],ymm5[5],ymm15[6,7,8,9],ymm5[10],ymm15[11,12],ymm5[13],ymm15[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm16, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,0,7,14],zero,zero,xmm4[3,10],zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7],ymm5[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, (%rdx)
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm14[u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm15[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0,1],ymm4[2],ymm15[3,4],ymm4[5],ymm15[6,7,8,9],ymm4[10],ymm15[11,12],ymm4[13],ymm15[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm29, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,0,7,14],zero,zero,xmm5[3,10],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, (%rdx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -10167,9 +10170,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm7
@@ -10191,19 +10194,19 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm23
-; AVX512DQ-FAST-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm3, %zmm20
+; AVX512DQ-FAST-NEXT:    vpor %xmm10, %xmm13, %xmm10
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm3, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm21, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 288(%rdi), %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm2[u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u]
@@ -10240,7 +10243,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4,5],ymm7[6],ymm3[7,8,9],ymm7[10],ymm3[11,12,13],ymm7[14],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
@@ -10260,10 +10263,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
@@ -10291,7 +10294,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -10313,11 +10316,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm9[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
@@ -10345,7 +10348,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1,2],ymm7[3],ymm3[4,5,6],ymm7[7,8],ymm3[9,10],ymm7[11],ymm3[12,13,14],ymm7[15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm1, %ymm5
@@ -10367,10 +10370,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,xmm9[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u]
@@ -10392,7 +10395,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm28
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
@@ -10418,7 +10421,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
@@ -10452,22 +10455,22 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[6,13]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %xmm7, %xmm8, %xmm7
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm12, %ymm11
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm12, %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm8
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm8
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm12
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm10
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm12
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[5,12],zero,zero,xmm5[1,8,15],zero,zero,xmm5[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm10, %xmm5
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm11, %xmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6],ymm0[7,8],ymm2[9,10,11],ymm0[12],ymm2[13,14],ymm0[15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm9, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm11
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm26, %ymm7
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm26, %ymm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm5, %ymm8
@@ -10476,11 +10479,11 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7,8],ymm0[9],ymm12[10,11,12],ymm0[13],ymm12[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm4, %ymm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
@@ -10515,7 +10518,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm7, %zmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
@@ -10524,8 +10527,8 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm4 {%k1}
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm30, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm11[u,u,u,u,2,9],zero,zero,zero,xmm11[5,12],zero,zero,xmm11[u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm10[u,u,u,u,2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[2,3,0,1]
@@ -10600,7 +10603,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k3}
@@ -10659,7 +10662,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm0 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -10686,7 +10689,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm18, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm18
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm18 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm0 {%k7}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -10714,7 +10717,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[3,10],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm19
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm19 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm21, %ymm20 {%k2}
@@ -10986,21 +10989,21 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm19
 ; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm5, %ymm5
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5,6],ymm5[7]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm4
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm7
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm5, %xmm12, %xmm5
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm4, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovq %rax, %k5
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm4, %zmm2 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
 ; AVX512BW-ONLY-FAST-NEXT:    movw $9288, %ax # imm = 0x2448
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k7
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm12 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm12 {%k7}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
@@ -11040,10 +11043,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm20
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,6],ymm15[7]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm6[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm15, %xmm20, %xmm15
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm14, %zmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm14 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u,u,u,u,u]
@@ -11064,9 +11067,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3,4,5,6],ymm0[7]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm6[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm15, %xmm19, %xmm15
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm0, %zmm15
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm14, %zmm15 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm0 {%k7}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
@@ -11091,9 +11094,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm18, %ymm14 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm6[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm14, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm18, %zmm14, %zmm18
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm22, %ymm21 {%k4}
@@ -11101,7 +11104,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k6}
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
@@ -11119,7 +11122,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm13 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[6,13,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
@@ -11135,7 +11138,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k4}
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k7}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
@@ -11156,14 +11159,14 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [16,25,18,3,28,21,6,23,24,17,10,27,20,13,30,31]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm0, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm14 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm14 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm0, %ymm14 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm0 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm0 {%k6}
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
@@ -11237,7 +11240,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm6[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k5}
@@ -11246,7 +11249,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm9, %xmm17, %xmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm20[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm6[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm20[0],xmm17[0],xmm20[1],xmm17[1],xmm20[2],xmm17[2],xmm20[3],xmm17[3]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm11, %zmm11
@@ -11256,34 +11259,34 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k5} = ymm19[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm12, %ymm10 {%k2}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,10,3,4,13,6,7,8,25,18,11,28,21,14,15]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm1, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm6
 ; AVX512BW-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm5, %ymm4 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
 ; AVX512BW-ONLY-FAST-NEXT:    movl $4186112, %edi # imm = 0x3FE000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 {%k1} = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm9 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm1, %zmm4, %zmm5
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm1
@@ -11336,8 +11339,8 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm11
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm11, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm12
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm12[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm13
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm13[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm5[7]
@@ -11346,31 +11349,31 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm24 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm10
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm9
 ; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm13, %xmm0, %xmm20
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm12, %xmm0, %xmm20
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm14
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
 ; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm0[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4,5],ymm13[6],ymm0[7,8,9],ymm13[10],ymm0[11,12,13],ymm13[14],ymm0[15]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4,5],ymm12[6],ymm0[7,8,9],ymm12[10],ymm0[11,12,13],ymm12[14],ymm0[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k7
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm20 {%k7}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm12
 ; AVX512DQBW-SLOW-NEXT:    movw $4644, %ax # imm = 0x1224
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm13, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm0 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero
@@ -11396,7 +11399,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
@@ -11404,7 +11407,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm0 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -11423,7 +11426,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm12, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm13, %xmm4
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm11[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm18[0],xmm4[0],xmm18[1],xmm4[1],xmm18[2],xmm4[2],xmm18[3],xmm4[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
@@ -11431,7 +11434,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm18, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm18
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm18 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm0 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
@@ -11448,7 +11451,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[5,12]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[5,12]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm11[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
@@ -11459,7 +11462,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[3,10],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm19
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm19 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm21, %ymm20 {%k2}
@@ -11477,7 +11480,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7,8,9,10],ymm4[11],ymm2[12,13],ymm4[14],ymm2[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm13, %ymm2 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
@@ -11496,7 +11499,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4,5,6],ymm4[7,8],ymm2[9,10],ymm4[11],ymm2[12,13,14],ymm4[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm13, %ymm2 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11]
@@ -11515,7 +11518,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6],ymm4[7,8],ymm2[9,10,11],ymm4[12],ymm2[13,14],ymm4[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm16, %ymm2 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm2 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
@@ -11548,14 +11551,14 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm16, %ymm0 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm16, %ymm0 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
@@ -11573,7 +11576,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm4, %xmm4
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[6,13]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[6,13]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm11[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
@@ -11584,7 +11587,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm2, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm11[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm12[0,7,14]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[0,7,14]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
@@ -11596,7 +11599,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm12[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm12[1,8,15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[1,8,15]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm8, %xmm11, %xmm8
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm7 {%k1}
@@ -11609,8 +11612,8 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 {%k5} = ymm0[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm4, %zmm4
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm8 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm25, %ymm3, %ymm4 {%k4}
@@ -11618,12 +11621,12 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6,7,8],ymm11[9],ymm4[10,11],ymm11[12],ymm4[13,14,15]
 ; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm11
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm22[5,12],zero,zero,xmm22[1,8,15],zero,zero,xmm22[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm11, %xmm12, %xmm11
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm22[5,12],zero,zero,xmm22[1,8,15],zero,zero,xmm22[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm11, %xmm13, %xmm11
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 {%k5} = ymm4[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm11 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm25, %ymm3 {%k6}
@@ -11651,14 +11654,14 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm13 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm12 {%k7}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm20, %zmm0, %zmm8 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm21, %zmm0, %zmm11 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[u,u,u,u,u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
@@ -11727,21 +11730,21 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm19
 ; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm5, %ymm5
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm4[0,1,2,3,4,5,6],ymm5[7]
-; AVX512DQBW-FAST-NEXT:    vmovdqa 240(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa 240(%rdi), %xmm5
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 224(%rdi), %xmm7
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm4, %zmm4
 ; AVX512DQBW-FAST-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512DQBW-FAST-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm2 {%k5}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
 ; AVX512DQBW-FAST-NEXT:    movw $9288, %ax # imm = 0x2448
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm12 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm12 {%k6}
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
@@ -11780,10 +11783,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm20
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,6],ymm15[7]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm15, %xmm20, %xmm15
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm14, %zmm14
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k5}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm14 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u,u,u,u,u]
@@ -11804,9 +11807,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
 ; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3,4,5,6],ymm0[7]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm15, %xmm19, %xmm15
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm15
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm0, %zmm15
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm14, %zmm15 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm0 {%k6}
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
@@ -11831,9 +11834,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm18, %ymm14 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm14, %zmm18
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm18, %zmm14, %zmm18
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm22, %ymm21 {%k3}
@@ -11841,7 +11844,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512DQBW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
@@ -11858,7 +11861,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm13 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[6,13,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
@@ -11874,7 +11877,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm0 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k6}
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
@@ -11895,14 +11898,14 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [16,25,18,3,28,21,6,23,24,17,10,27,20,13,30,31]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm14, %zmm16
 ; AVX512DQBW-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm14 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm14 {%k2}
 ; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm16, %ymm14 {%k7}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm16 {%k4}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm16 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm17
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,1,8,15],zero,zero,xmm16[4,11],zero,zero,xmm16[u,u,u]
@@ -11976,7 +11979,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm20 {%k5}
@@ -11985,7 +11988,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm9, %xmm17, %xmm9
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm19[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm0
@@ -11995,7 +11998,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm3, %xmm0
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
@@ -12004,9 +12007,9 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,10,3,4,13,6,7,8,25,18,11,28,21,14,15]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm1, %zmm1
 ; AVX512DQBW-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[u,u,u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm4 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512DQBW-FAST-NEXT:    movl $4186112, %edi # imm = 0x3FE000

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index 83083164d3c36..017c8b7ea44ab 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -324,40 +324,40 @@ define void @store_i16_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, (%rcx)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rcx)
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, 32(%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride3_vf8:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,0,0,u,1,1,u,2>
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,4,1,5,1,5,2,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm5, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,4,5],zero,zero,ymm3[2,3,6,7],zero,zero,ymm3[8,9,12,13],zero,zero,ymm3[18,19,22,23],zero,zero,ymm3[24,25,28,29],zero,zero,ymm3[26,27]
-; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm3
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, (%rcx)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rcx)
+; AVX512F-FAST-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, (%rdx), %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,0,0,u,1,1,u,2>
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm3, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,4,1,5,1,5,2,6]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm3, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5],zero,zero,ymm0[2,3,6,7],zero,zero,ymm0[8,9,12,13],zero,zero,ymm0[18,19,22,23],zero,zero,ymm0[24,25,28,29],zero,zero,ymm0[26,27]
+; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, 32(%rcx)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
 ; AVX512BW-LABEL: store_i16_stride3_vf8:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,8,16,1,9,17,2,10,18,3,11,19,4,12,20,5,13,21,6,14,22,7,15,23,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
index 6df7e2dd9c001..bc25bb39f9691 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -494,22 +494,22 @@ define void @store_i16_stride4_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa 16(%rdx), %xmm3
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm1
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX512F-NEXT:    vmovdqa 16(%rsi), %xmm4
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm5
 ; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm6
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm7, %zmm4, %zmm4
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,16,1,17,2,18,3,19,8,24,9,25,10,26,11,27]
 ; AVX512F-NEXT:    vpermt2d %zmm1, %zmm6, %zmm4
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm6, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, (%r8)
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, 64(%r8)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index a852ec465f4d6..5ef699f087c32 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -342,10 +342,9 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,1,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, (%r9)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -369,10 +368,9 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,6,7,14,15,u,u,8,9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, (%r9)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm1
 ; AVX512F-FAST-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm1, (%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -386,7 +384,7 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,16,1,5,9,13,17,2,6,10,14,18,3,7,11,15,19,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
@@ -2769,9 +2767,9 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride5_vf32:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm0[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
@@ -2781,9 +2779,9 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm10
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm22
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
@@ -2793,114 +2791,112 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7,8],ymm6[9],ymm7[10],ymm6[11],ymm7[12,13],ymm6[14],ymm7[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm15
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %xmm23
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,2,1,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm16, %zmm6
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm13, %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
 ; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm17
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm17[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm15[0],ymm6[1],ymm15[2,3],ymm6[4],ymm15[5],ymm6[6],ymm15[7,8],ymm6[9],ymm15[10,11],ymm6[12],ymm15[13],ymm6[14],ymm15[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
-; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3],xmm8[4],xmm11[5],xmm8[6],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm8, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm15, %ymm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm21
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm4[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm11[1],ymm7[2],ymm11[3],ymm7[4,5],ymm11[6],ymm7[7,8],ymm11[9],ymm7[10],ymm11[11],ymm7[12,13],ymm11[14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm10, %xmm10
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm17[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5],ymm6[6],ymm7[7,8],ymm6[9],ymm7[10,11],ymm6[12],ymm7[13],ymm6[14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm11
+; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm14
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm14[1],xmm11[2,3],xmm14[4],xmm11[5],xmm14[6],xmm11[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm4[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm14[1],ymm8[2],ymm14[3],ymm8[4,5],ymm14[6],ymm8[7,8],ymm14[9],ymm8[10],ymm14[11],ymm8[12,13],ymm14[14],ymm8[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,2,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm9, %zmm18
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm18
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm22
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm23
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm8, %ymm19, %ymm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2],xmm8[3],xmm11[4,5],xmm8[6],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm6
+; AVX512F-SLOW-NEXT:    vpandnq %ymm10, %ymm19, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2],xmm0[3],xmm12[4,5],xmm0[6],xmm12[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm7
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5],xmm8[6],xmm6[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm8, %zmm6
-; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm9[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm13, %ymm11
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm11[0,1],ymm6[2],ymm11[3],ymm6[4],ymm11[5,6],ymm6[7],ymm11[8,9],ymm6[10],ymm11[11],ymm6[12],ymm11[13,14],ymm6[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm17[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm13[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7,8],ymm11[9],ymm12[10],ymm11[11],ymm12[12,13],ymm11[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm6, %zmm11
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm4[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5],ymm13[6],ymm12[7,8],ymm13[9],ymm12[10,11],ymm13[12],ymm12[13],ymm13[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm9[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm13, %ymm12
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0,1],ymm7[2],ymm12[3],ymm7[4],ymm12[5,6],ymm7[7],ymm12[8,9],ymm7[10],ymm12[11],ymm7[12],ymm12[13,14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0],ymm12[1],ymm13[2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7,8],ymm12[9],ymm13[10],ymm12[11],ymm13[12,13],ymm12[14],ymm13[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm12
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm4[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5],ymm14[6],ymm13[7,8],ymm14[9],ymm13[10,11],ymm14[12],ymm13[13],ymm14[14],ymm13[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0],ymm4[1],ymm13[2],ymm4[3,4],ymm13[5,6,7,8],ymm4[9],ymm13[10],ymm4[11,12],ymm13[13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3,4],ymm6[5,6,7,8],ymm4[9],ymm6[10],ymm4[11,12],ymm6[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm8, %zmm4
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm8
-; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm8
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm13, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm12, %zmm8, %zmm4
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm6
+; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm4
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm3[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm11[0],ymm4[1],ymm11[2,3],ymm4[4],ymm11[5],ymm4[6],ymm11[7,8],ymm4[9],ymm11[10,11],ymm4[12],ymm11[13],ymm4[14],ymm11[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm3[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5],ymm4[6],ymm8[7,8],ymm4[9],ymm8[10,11],ymm4[12],ymm8[13],ymm4[14],ymm8[15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vprolq $16, %ymm5, %ymm5
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
@@ -2908,24 +2904,24 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm4
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm1, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm9, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3],ymm5[4],ymm1[5],ymm5[6],ymm1[7,8],ymm5[9],ymm1[10,11],ymm5[12],ymm1[13],ymm5[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm1
+; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm3, %ymm4, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
@@ -5706,30 +5702,29 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride5_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $616, %rsp # imm = 0x268
+; AVX512F-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdx), %ymm26
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm26[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm3
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,2,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm13, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm7[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm7, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdi), %ymm21
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm21[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
@@ -5742,25 +5737,23 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm1[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm5, %ymm1, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm5, %ymm9, %ymm5
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm27
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdx), %ymm31
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm31[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3,4],ymm5[5,6,7,8],ymm4[9],ymm5[10],ymm4[11,12],ymm5[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm28
@@ -5777,69 +5770,69 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5],xmm5[6],xmm2[7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm27[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm14
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm14[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3,4],ymm5[5,6,7,8],ymm0[9],ymm5[10],ymm0[11,12],ymm5[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm6[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3,4],ymm4[5,6,7,8],ymm0[9],ymm4[10],ymm0[11,12],ymm4[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2,3],xmm5[4],xmm0[5],xmm5[6],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm4
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm6[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm21
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm21[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm16
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm16[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7,8],ymm8[9],ymm3[10],ymm8[11],ymm3[12,13],ymm8[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm3
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm2[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm12[0],ymm8[1],ymm12[2],ymm8[3],ymm12[4,5],ymm8[6],ymm12[7,8],ymm8[9],ymm12[10],ymm8[11],ymm12[12,13],ymm8[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2],xmm11[3],xmm5[4,5],xmm11[6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm31
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2],xmm11[3],xmm4[4,5],xmm11[6],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm5, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm4, %ymm8
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm0[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0],ymm8[1],ymm11[2,3],ymm8[4],ymm11[5],ymm8[6],ymm11[7,8],ymm8[9],ymm11[10,11],ymm8[12],ymm11[13],ymm8[14],ymm11[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm11
@@ -5853,69 +5846,69 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm29
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm8
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm8[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm10, %ymm11, %ymm10
+; AVX512F-SLOW-NEXT:    vpandn %ymm10, %ymm9, %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4,5],xmm8[6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm13, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0,1],ymm4[2],ymm8[3],ymm4[4],ymm8[5,6],ymm4[7],ymm8[8,9],ymm4[10],ymm8[11],ymm4[12],ymm8[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm23
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm13[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm7[1],ymm4[2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10],ymm7[11],ymm4[12,13],ymm7[14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm22
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm8, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4,5],xmm8[6],xmm5[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm24
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm21[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1],ymm5[2],ymm8[3],ymm5[4],ymm8[5,6],ymm5[7],ymm8[8,9],ymm5[10],ymm8[11],ymm5[12],ymm8[13,14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm7[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7,8],ymm7[9],ymm5[10],ymm7[11],ymm5[12,13],ymm7[14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm9, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5],ymm8[6],ymm7[7,8],ymm8[9],ymm7[10,11],ymm8[12],ymm7[13],ymm8[14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm9, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm26[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5],ymm8[6],ymm7[7,8],ymm8[9],ymm7[10,11],ymm8[12],ymm7[13],ymm8[14],ymm7[15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm9[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm15[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm13[1],ymm7[2],ymm13[3,4],ymm7[5,6,7,8],ymm13[9],ymm7[10],ymm13[11,12],ymm7[13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm26[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2],ymm8[3,4],ymm7[5,6,7,8],ymm8[9],ymm7[10],ymm8[11,12],ymm7[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm26
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm5, %ymm13
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm13[0,1],ymm7[2],ymm13[3],ymm7[4],ymm13[5,6],ymm7[7],ymm13[8,9],ymm7[10],ymm13[11],ymm7[12],ymm13[13,14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm4, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm7[2],ymm8[3],ymm7[4],ymm8[5,6],ymm7[7],ymm8[8,9],ymm7[10],ymm8[11],ymm7[12],ymm8[13,14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm5[0],ymm0[1],ymm5[2],ymm0[3],ymm5[4,5],ymm0[6],ymm5[7,8],ymm0[9],ymm5[10],ymm0[11],ymm5[12,13],ymm0[14],ymm5[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5],ymm5[6],ymm0[7,8],ymm5[9],ymm0[10,11],ymm5[12],ymm0[13],ymm5[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8],ymm0[9],ymm4[10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm3, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3],ymm4[4],ymm0[5],ymm4[6],ymm0[7,8],ymm4[9],ymm0[10,11],ymm4[12],ymm0[13],ymm4[14],ymm0[15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm6, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm21[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm16[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm21[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm14, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm6, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm27[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm14, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm27[1,1,1,2,5,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
@@ -5928,123 +5921,122 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm27
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm6
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm14
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm14
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm28[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5],ymm14[6],ymm12[7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13],ymm14[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm17[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm17[2,3,2,2]
 ; AVX512F-SLOW-NEXT:    vprolq $16, %ymm11, %ymm14
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm28[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1],ymm11[2],ymm14[3],ymm11[4],ymm14[5,6],ymm11[7],ymm14[8,9],ymm11[10],ymm14[11],ymm11[12],ymm14[13,14],ymm11[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm21
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm11, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm8[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm31[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7,8],ymm12[9],ymm14[10],ymm12[11],ymm14[12,13],ymm12[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm11, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm11, %ymm5
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm18[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm31[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5],ymm9[6],ymm5[7,8],ymm9[9],ymm5[10,11],ymm9[12],ymm5[13],ymm9[14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, (%rsp), %zmm11, %zmm17 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm8, %zmm18, %zmm17
-; AVX512F-SLOW-NEXT:    vpbroadcastq 88(%r8), %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm9, %zmm18, %zmm17
+; AVX512F-SLOW-NEXT:    vpbroadcastq 88(%r8), %ymm9
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 96(%r8), %ymm20
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm20, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm20, %zmm9
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm11, %zmm17
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm18, %zmm9
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm18, %zmm13
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm14
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm17
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm14, %zmm14
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm20, %zmm14
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm22[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm20, %zmm14
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm22[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm17 = mem[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm5
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 80(%r8), %ymm12
 ; AVX512F-SLOW-NEXT:    vpandn %ymm12, %ymm1, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm11
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm12, %ymm11
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm26[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm25[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm25[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm24[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm19[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm19[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm15[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm26[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm11, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm19 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm31, %zmm11, %zmm29
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm29 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm19, %zmm26
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm29, %zmm10
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm11 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm13, %zmm11
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm13 = mem[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm9, %zmm17, %zmm11
-; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm17, %zmm13
+; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm12, %zmm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm20, %zmm11
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm12, %zmm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm20, %zmm13
 ; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm19 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm17, %zmm19
-; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm17, %zmm19
+; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm19, %zmm12, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm25, %zmm12
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm17, %zmm12
-; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm12
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm25, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm12, %zmm17, %zmm13
+; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm12
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 120(%r8), %ymm15
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm11, %zmm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm12, %zmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm15, %zmm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm15, %zmm12
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm17, %zmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm5
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm15, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm15, %zmm4
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm18, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm18, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm18, %zmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm2, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm5, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 384(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 576(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm14, 128(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 320(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 448(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 320(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 448(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 512(%r9)
-; AVX512F-SLOW-NEXT:    addq $616, %rsp # imm = 0x268
+; AVX512F-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 9592967f730eb..e2ca0de7b33fe 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -405,9 +405,9 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, (%rax)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, 32(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -438,9 +438,9 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[4,5,12,13,u,u,u,u,u,u,u,u,6,7,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, (%rax)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm0, 32(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm1, (%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -457,7 +457,7 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,16,20,1,5,9,13,17,21,2,6,10,14,18,22,3,7,11,15,19,23,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 350e0c193e0e7..f87561e3aaa6a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -1030,21 +1030,21 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm6
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm2
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm5, %ymm8
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm1[0,2,0,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm2[0,2,0,2]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[0,1,8,9,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[18,19,26,27,u,u,u,u,u,u],zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm7[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,ymm10[u,u,u,u,u,u,6,7,14,15],zero,zero,zero,zero,ymm10[u,u,u,u,u,u,16,17,24,25],zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[4,5,u,u,u,u,u,u,u,u,u,u,u,u,6,7,22,23,u,u,u,u,u,u,u,u,u,u,u,u,24,25]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm1[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[4,5,u,u,u,u,u,u,u,u,u,u,u,u,6,7,22,23,u,u,u,u,u,u,u,u,u,u,u,u,24,25]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm2[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,2,2,4,5,6,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm11[1],ymm10[2,3,4,5],ymm11[6],ymm10[7,8],ymm11[9],ymm10[10,11,12,13],ymm11[14],ymm10[15]
@@ -1064,15 +1064,15 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
 ; AVX512F-SLOW-NEXT:    vpsrlq $48, %xmm4, %xmm4
 ; AVX512F-SLOW-NEXT:    vpunpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm4[1]
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
-; AVX512F-SLOW-NEXT:    vpsrld $16, %xmm6, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpsrld $16, %xmm6, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 12(%r10), %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6],xmm3[7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4],xmm2[5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,1,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6],xmm3[7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4],xmm1[5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[1,3,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u],zero,zero,zero,zero,ymm1[2,3,10,11,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[20,21,28,29,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm7[1,3,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,2,3,10,11],zero,zero,zero,zero,ymm2[u,u,u,u,u,u,20,21,28,29],zero,zero,zero,zero,ymm2[u,u,u,u]
@@ -1082,9 +1082,9 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogd $206, 8(%r10){1to8}, %ymm2, %ymm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, 64(%rax)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm0, 96(%rax)
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, 96(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, 64(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -1102,27 +1102,6 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm7
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm8
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm7[0,2,0,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[0,1,8,9,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[18,19,26,27,u,u,u,u,u,u],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm6[0,2,1,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,ymm10[u,u,u,u,u,u,6,7,14,15],zero,zero,zero,zero,ymm10[u,u,u,u,u,u,16,17,24,25],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm6[0,2,2,0]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,8,9],zero,zero,zero,zero,ymm10[u,u,u,u,u,u,2,3,18,19],zero,zero,zero,zero,ymm10[u,u,u,u,u,u,28,29,20,21]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <1,5,u,u,5,2,6,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm11, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,1,4,5,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[2,3,18,19,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[20,21,24,25]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512F-FAST-NEXT:    vporq %zmm9, %zmm10, %zmm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm8[0,2,0,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[u,u,u,u,u,u,u,u,0,1,8,9],zero,zero,ymm10[u,u,u,u,u,u,u,u,18,19,26,27],zero,zero,ymm10[u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,4,5,12,13],zero,zero,ymm10[u,u,u,u,u,u,u,u,22,23,30,31],zero,zero,ymm10[u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512F-FAST-NEXT:    vpbroadcastd (%r10), %ymm11
-; AVX512F-FAST-NEXT:    vpbroadcastd 4(%r10), %ymm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm3, %xmm3
 ; AVX512F-FAST-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm3[1]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
@@ -1143,10 +1122,31 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vpternlogd $206, 8(%r10){1to8}, %ymm2, %ymm3
 ; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, 64(%rax)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 96(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm7[0,2,0,2]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[0,1,8,9,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[18,19,26,27,u,u,u,u,u,u],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm6[0,2,1,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm3[u,u,u,u,u,u,6,7,14,15],zero,zero,zero,zero,ymm3[u,u,u,u,u,u,16,17,24,25],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm6[0,2,2,0]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,8,9],zero,zero,zero,zero,ymm3[u,u,u,u,u,u,2,3,18,19],zero,zero,zero,zero,ymm3[u,u,u,u,u,u,28,29,20,21]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <1,5,u,u,5,2,6,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,4,5,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[2,3,18,19,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[20,21,24,25]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vporq %zmm2, %zmm3, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm8[0,2,0,2]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,0,1,8,9],zero,zero,ymm3[u,u,u,u,u,u,u,u,18,19,26,27],zero,zero,ymm3[u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,4,5,12,13],zero,zero,ymm3[u,u,u,u,u,u,u,u,22,23,30,31],zero,zero,ymm3[u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-FAST-NEXT:    vpbroadcastd (%r10), %ymm4
+; AVX512F-FAST-NEXT:    vpbroadcastd 4(%r10), %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa %xmm0, 96(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm1, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -1157,12 +1157,11 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512BW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512BW-NEXT:    vmovdqa (%r10), %xmm3
 ; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vinserti32x4 $2, (%r10), %zmm1, %zmm1
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,8,16,24,32,40,48,1,9,17,25,33,41,49,2,10,18,26,34,42,50,3,11,19,27,35,43,51,4,12,20,28]
 ; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <36,44,52,5,13,21,29,37,45,53,6,14,22,30,38,46,54,7,15,23,31,39,47,55,u,u,u,u,u,u,u,u>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
index 463b3bf6b94be..5d575ba3cec5e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
@@ -1642,56 +1642,56 @@ define void @store_i16_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa (%rcx), %ymm10
 ; AVX512F-NEXT:    vmovdqa (%r8), %ymm15
 ; AVX512F-NEXT:    vmovdqa (%r9), %ymm3
-; AVX512F-NEXT:    vmovdqa (%r10), %ymm5
+; AVX512F-NEXT:    vmovdqa (%r10), %ymm6
 ; AVX512F-NEXT:    vmovdqa (%rax), %ymm0
 ; AVX512F-NEXT:    vmovdqa (%rax), %xmm2
 ; AVX512F-NEXT:    vmovdqa (%r10), %xmm4
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
 ; AVX512F-NEXT:    vmovdqa64 %xmm4, %xmm21
 ; AVX512F-NEXT:    vmovdqa64 %xmm2, %xmm22
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm16
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm16
 ; AVX512F-NEXT:    vmovdqa (%r9), %xmm2
 ; AVX512F-NEXT:    vmovdqa (%r8), %xmm8
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7]
 ; AVX512F-NEXT:    vmovdqa64 %xmm2, %xmm23
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm20
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm20
 ; AVX512F-NEXT:    vmovdqa (%rcx), %xmm11
 ; AVX512F-NEXT:    vmovdqa (%rdx), %xmm12
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm17
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm17
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm13
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm14
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11]
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
+; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[8],ymm0[8],ymm6[9],ymm0[9],ymm6[10],ymm0[10],ymm6[11],ymm0[11]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm18
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[8],ymm3[8],ymm15[9],ymm3[9],ymm15[10],ymm3[10],ymm15[11],ymm3[11]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm4
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm19
-; AVX512F-NEXT:    vmovdqa (%rsi), %ymm6
+; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm9[0],ymm10[0],ymm9[1],ymm10[1],ymm9[2],ymm10[2],ymm9[3],ymm10[3],ymm9[8],ymm10[8],ymm9[9],ymm10[9],ymm9[10],ymm10[10],ymm9[11],ymm10[11]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm19
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm5
 ; AVX512F-NEXT:    vmovdqa %ymm7, %ymm1
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm6[0],ymm7[1],ymm6[1],ymm7[2],ymm6[2],ymm7[3],ymm6[3],ymm7[8],ymm6[8],ymm7[9],ymm6[9],ymm7[10],ymm6[10],ymm7[11],ymm6[11]
+; AVX512F-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm7, %zmm7
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15]
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm6[4],ymm0[4],ymm6[5],ymm0[5],ymm6[6],ymm0[6],ymm6[7],ymm0[7],ymm6[12],ymm0[12],ymm6[13],ymm0[13],ymm6[14],ymm0[14],ymm6[15],ymm0[15]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm15[4],ymm3[4],ymm15[5],ymm3[5],ymm15[6],ymm3[6],ymm15[7],ymm3[7],ymm15[12],ymm3[12],ymm15[13],ymm3[13],ymm15[14],ymm3[14],ymm15[15],ymm3[15]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm3, %zmm3
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm5
-; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm1[4],ymm6[4],ymm1[5],ymm6[5],ymm1[6],ymm6[6],ymm1[7],ymm6[7],ymm1[12],ymm6[12],ymm1[13],ymm6[13],ymm1[14],ymm6[14],ymm1[15],ymm6[15]
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm9[4],ymm10[4],ymm9[5],ymm10[5],ymm9[6],ymm10[6],ymm9[7],ymm10[7],ymm9[12],ymm10[12],ymm9[13],ymm10[13],ymm9[14],ymm10[14],ymm9[15],ymm10[15]
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm6
+; AVX512F-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4],ymm5[4],ymm1[5],ymm5[5],ymm1[6],ymm5[6],ymm1[7],ymm5[7],ymm1[12],ymm5[12],ymm1[13],ymm5[13],ymm1[14],ymm5[14],ymm1[15],ymm5[15]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm5
 ; AVX512F-NEXT:    vmovdqa64 %xmm21, %xmm1
 ; AVX512F-NEXT:    vmovdqa64 %xmm22, %xmm9
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm9, %zmm9
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm9
 ; AVX512F-NEXT:    vmovdqa64 %xmm23, %xmm1
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm8, %zmm8
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm8
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm10, %zmm10
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm10, %zmm10, %zmm10
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm11, %zmm11
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,u,0,16,u,u,1,17,10,10,10,26,u,u,11,27>
 ; AVX512F-NEXT:    vpermt2d %zmm16, %zmm12, %zmm20
@@ -1706,13 +1706,13 @@ define void @store_i16_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpermt2d %zmm19, %zmm14, %zmm7
 ; AVX512F-NEXT:    vmovdqa64 %zmm4, %zmm7 {%k1}
 ; AVX512F-NEXT:    vpermt2d %zmm0, %zmm1, %zmm3
-; AVX512F-NEXT:    vpermt2d %zmm5, %zmm14, %zmm6
-; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm6 {%k1}
+; AVX512F-NEXT:    vpermt2d %zmm6, %zmm14, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %zmm3, %zmm5 {%k1}
 ; AVX512F-NEXT:    vpermt2d %zmm9, %zmm12, %zmm8
 ; AVX512F-NEXT:    vpermt2d %zmm10, %zmm13, %zmm11
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, %zmm11 {%k1}
 ; AVX512F-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512F-NEXT:    vmovdqa64 %zmm6, 192(%rax)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, 192(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, 128(%rax)
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, 64(%rax)
 ; AVX512F-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
index f0d7f876da3e3..b98c60eab6d7c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
@@ -169,9 +169,8 @@ define void @store_i32_stride3_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-LABEL: store_i32_stride3_vf4:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm0
-; AVX512-NEXT:    vmovaps (%rdx), %xmm1
 ; AVX512-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vinsertf32x4 $2, (%rdx), %zmm0, %zmm0
 ; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,4,8,1,5,9,2,6,10,3,7,11,u,u,u,u>
 ; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
 ; AVX512-NEXT:    vextractf32x4 $2, %zmm0, 32(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index 1275cb2e682e9..440c7b719c828 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -95,7 +95,7 @@ define void @store_i32_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,2,4,6,8,1,3,5,7,9,u,u,u,u,u,u>
 ; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
 ; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm1

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index be6a95630ae47..fdae93ac90e5b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -151,7 +151,7 @@ define void @store_i32_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
 ; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vinsertf32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,2,4,6,8,10,1,3,5,7,9,11,u,u,u,u>
 ; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
 ; AVX512-NEXT:    vextractf32x4 $2, %zmm0, 32(%rax)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index dc57a11304d0a..a45c1922e8f4d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -577,12 +577,11 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512-NEXT:    vmovdqa (%r10), %xmm3
 ; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
 ; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-NEXT:    vinserti32x4 $2, (%r10), %zmm1, %zmm1
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,4,8,12,16,20,24,1,5,9,13,17,21,25,2,6]
 ; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <10,14,18,22,26,3,7,11,15,19,23,27,u,u,u,u>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
index cfb3eead834c8..79ae6890a672c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll
@@ -62,9 +62,8 @@ define void @store_i64_stride3_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-LABEL: store_i64_stride3_vf2:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    vmovaps (%rdi), %xmm0
-; AVX512-NEXT:    vmovaps (%rdx), %xmm1
 ; AVX512-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vinsertf32x4 $2, (%rdx), %zmm0, %zmm0
 ; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,2,4,1,3,5,u,u>
 ; AVX512-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
 ; AVX512-NEXT:    vextractf32x4 $2, %zmm0, 32(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
index c6a0e42022b98..eadc265e94189 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-7.ll
@@ -107,12 +107,11 @@ define void @store_i64_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512-NEXT:    vmovdqa (%r10), %xmm3
 ; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
 ; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm1
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-NEXT:    vinserti32x4 $2, (%r10), %zmm1, %zmm1
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,2,4,6,8,10,12,1]
 ; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <3,5,7,9,11,13,u,u>

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 95825285815c0..bf57ee77a355a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -438,10 +438,9 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-SLOW-NEXT:    vmovd %eax, %xmm1
 ; AVX512F-SLOW-NEXT:    vpbroadcastw %xmm1, %xmm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, (%r9)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vmovq %xmm1, 32(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -470,10 +469,9 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-FAST-NEXT:    vmovd %eax, %xmm1
 ; AVX512F-FAST-NEXT:    vpbroadcastw %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, (%r9)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm0
+; AVX512F-FAST-NEXT:    vmovq %xmm1, 32(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm0, (%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -505,10 +503,9 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-SLOW-NEXT:    movw $132, %ax
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm2, (%r9)
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm1, (%r9)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
 ;
@@ -540,10 +537,9 @@ define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-FAST-NEXT:    movw $132, %ax
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %xmm1, %xmm0 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm2, (%r9)
-; AVX512BW-FAST-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovq %xmm0, 32(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, (%r9)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <8 x i8>, ptr %in.vecptr0, align 64
@@ -3903,131 +3899,135 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-LABEL: store_i8_stride5_vf64:
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm3, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm5, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm4, %ymm1
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm17
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm14
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm14, %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm7, %ymm8
-; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm8, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm7, %ymm10
+; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm10, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm10, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-SLOW-NEXT:    vporq %xmm1, %xmm12, %xmm20
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512F-SLOW-NEXT:    vporq %xmm12, %xmm13, %xmm20
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm22
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm23
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[27],zero,zero,ymm7[26],zero,ymm7[28],zero,ymm7[30],zero,zero,ymm7[29],zero,ymm7[31],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm18
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm22
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm23
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[27],zero,zero,ymm7[26],zero,ymm7[28],zero,ymm7[30],zero,zero,ymm7[29],zero,ymm7[31],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm15, %zmm18
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm4, %ymm15
-; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm15, %ymm21
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm6
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vporq %ymm15, %ymm1, %ymm21
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm15, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm11
-; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm11, %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm14
-; AVX512F-SLOW-NEXT:    vporq %xmm0, %xmm14, %xmm25
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm9
+; AVX512F-SLOW-NEXT:    vporq %ymm1, %ymm9, %ymm24
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm13
+; AVX512F-SLOW-NEXT:    vporq %xmm1, %xmm13, %xmm25
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm5
-; AVX512F-SLOW-NEXT:    vporq %xmm14, %xmm5, %xmm28
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm5
+; AVX512F-SLOW-NEXT:    vporq %xmm13, %xmm5, %xmm28
 ; AVX512F-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm29
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm26 = xmm29[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[0,1,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
 ; AVX512F-SLOW-NEXT:    vpandnq %ymm26, %ymm27, %ymm26
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm26, %zmm26
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm13
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm27 = ymm13[0,2,1,1,4,6,5,5]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm26, %zmm26
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm27 = ymm4[0,2,1,1,4,6,5,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,3,2]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
 ; AVX512F-SLOW-NEXT:    vpandnq %ymm27, %ymm30, %ymm27
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm13, %ymm14
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm14, %zmm27
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm4, %ymm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm13, %zmm27
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u,9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm12, %ymm14
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm12, %ymm13
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm7, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm4, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[27],zero,zero,ymm4[26],zero,ymm4[28],zero,ymm4[30],zero,zero,ymm4[29],zero,ymm4[31],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm6, %ymm12
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm6[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm7[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm14[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm7[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[21],zero,zero,ymm9[20],zero,ymm9[22],zero,ymm9[24],zero,zero,ymm9[23],zero,ymm9[25],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm9[26],zero,ymm9[28],zero,zero,ymm9[27],zero,ymm9[29],zero,ymm9[31],zero,zero,ymm9[30],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm11
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm11
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm11[0],xmm1[0],xmm11[1],xmm1[1],xmm11[2],xmm1[2],xmm11[3],xmm1[3],xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm5[0,2,1,1,4,6,5,5]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[21],zero,zero,ymm11[20],zero,ymm11[22],zero,ymm11[24],zero,zero,ymm11[23],zero,ymm11[25],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[26],zero,ymm11[28],zero,zero,ymm11[27],zero,ymm11[29],zero,ymm11[31],zero,zero,ymm11[30],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm9
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm9
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3],xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm10, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm5[0,2,1,1,4,6,5,5]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm1, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm25, %zmm0, %zmm25
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm28, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
@@ -4035,37 +4035,37 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm3
-; AVX512F-SLOW-NEXT:    vpor %ymm14, %ymm12, %ymm1
+; AVX512F-SLOW-NEXT:    vpor %ymm13, %ymm12, %ymm1
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm21, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm12, %ymm10, %ymm6
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm24, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm12, %ymm14, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm24, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm7
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm10 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm12, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vpandq %ymm12, %ymm30, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm31, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm12, %ymm2, %ymm6
+; AVX512F-SLOW-NEXT:    vpandq %ymm12, %ymm31, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm30, %zmm1
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm18[2,2,3,3,6,6,7,7]
 ; AVX512F-SLOW-NEXT:    vporq %zmm2, %zmm1, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm10, %zmm2, %zmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm9, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm2, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm2, %zmm8
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm26
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm11[2,3,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm9[2,3,3,2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm27
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm13[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm27
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm29[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm3
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm25[0,0,1,1,4,4,5,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,0,1,1,4,4,5,5]
@@ -4106,11 +4106,11 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm6, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm14, %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm10
 ; AVX512F-FAST-NEXT:    vpor %ymm8, %ymm10, %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm8
@@ -4131,12 +4131,12 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm20
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm1
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm1, %ymm21
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm13
+; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm13, %ymm21
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm9
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm5
 ; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm5, %ymm24
@@ -4148,75 +4148,75 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm15
 ; AVX512F-FAST-NEXT:    vporq %xmm5, %xmm15, %xmm25
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm15
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm12
-; AVX512F-FAST-NEXT:    vporq %xmm15, %xmm12, %xmm29
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm11
+; AVX512F-FAST-NEXT:    vporq %xmm5, %xmm11, %xmm29
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [1,1,2,2,2,2,2,2]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm15
-; AVX512F-FAST-NEXT:    vpermd %ymm15, %ymm26, %ymm27
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm11
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm26, %ymm27
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
 ; AVX512F-FAST-NEXT:    vpandnq %ymm27, %ymm28, %ymm27
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm27, %zmm31
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm12
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm27, %zmm31
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [4,6,5,5,5,5,4,6]
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm27, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm5
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm27, %ymm28
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
 ; AVX512F-FAST-NEXT:    vpandnq %ymm28, %ymm30, %ymm28
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm2, %zmm28
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u,9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm11
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm12
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[19],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm15
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm13, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[27],zero,zero,ymm13[26],zero,ymm13[28],zero,ymm13[30],zero,zero,ymm13[29],zero,ymm13[31],zero,zero
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm11[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[27],zero,zero,ymm4[26],zero,ymm4[28],zero,ymm4[30],zero,zero,ymm4[29],zero,ymm4[31],zero,zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm12[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[21],zero,zero,ymm9[20],zero,ymm9[22],zero,ymm9[24],zero,zero,ymm9[23],zero,ymm9[25],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm13[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[21],zero,zero,ymm9[20],zero,ymm9[22],zero,ymm9[24],zero,zero,ymm9[23],zero,ymm9[25],zero,zero
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm9[26],zero,ymm9[28],zero,zero,ymm9[27],zero,ymm9[29],zero,ymm9[31],zero,zero,ymm9[30],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm14[2,2,3,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm7
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3],xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm14[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm1, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm15[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm25, %zmm1, %zmm18
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,6,6,6,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm15, %ymm14, %ymm25
-; AVX512F-FAST-NEXT:    vpermd %ymm15, %ymm27, %ymm27
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [6,6,6,6,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm15, %ymm25
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm27, %ymm27
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [0,0,0,0,0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm15, %ymm29, %ymm15
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm14, %ymm14
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm26, %ymm26
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm29, %ymm12
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm29, %ymm11
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm26, %ymm26
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm29, %ymm5
 ; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm17 = mem[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm16 # 32-byte Folded Reload
@@ -4227,33 +4227,33 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm6, %ymm2
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm21, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm10, %ymm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm24, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm10, %ymm12
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm24, %zmm10
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm19, %zmm10
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm7, %ymm13
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm12
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm14, %ymm4
 ; AVX512F-FAST-NEXT:    vpandq %ymm6, %ymm30, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm20[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vporq %zmm5, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm13, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm7, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm20[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vporq %zmm6, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm12, %zmm6, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm9
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm5, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm6, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm31
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm27, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm28
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm15, %zmm2
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm18[0,0,1,1,4,4,5,5]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm3[0,0,1,1,4,4,5,5]
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm12, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm5, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
@@ -4293,8 +4293,8 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm5 {%k7}
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm13
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm16
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm16[0],xmm13[0],xmm16[1],xmm13[1],xmm16[2],xmm13[2],xmm16[3],xmm13[3],xmm16[4],xmm13[4],xmm16[5],xmm13[5],xmm16[6],xmm13[6],xmm16[7],xmm13[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm17[0],xmm13[0],xmm17[1],xmm13[1],xmm17[2],xmm13[2],xmm17[3],xmm13[3],xmm17[4],xmm13[4],xmm17[5],xmm13[5],xmm17[6],xmm13[6],xmm17[7],xmm13[7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm9
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
@@ -4311,7 +4311,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm9 {%k1}
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %ymm4
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r8), %ymm17
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r8), %ymm16
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm4[2,2,3,3,6,6,7,7]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
 ; AVX512BW-SLOW-NEXT:    movl $-2078209982, %eax # imm = 0x84210842
@@ -4327,41 +4327,41 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vporq %ymm18, %ymm19, %ymm18
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm21, %xmm11, %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm20 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm20, %xmm11, %xmm11
 ; AVX512BW-SLOW-NEXT:    vpor %xmm12, %xmm11, %xmm11
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm11, %zmm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm20 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm20, %xmm16, %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm21, %xmm17, %xmm11
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm13, %xmm12
 ; AVX512BW-SLOW-NEXT:    vpor %xmm11, %xmm12, %xmm11
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm11[0,0,1,1]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm25
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <3,3,3,u,4,4,4,4>
-; AVX512BW-SLOW-NEXT:    vpermd %ymm25, %ymm11, %ymm16
+; AVX512BW-SLOW-NEXT:    vpermd %ymm25, %ymm11, %ymm17
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm26
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,0,u,u,u,u,1,u,u,u,u,2,u,u,u,u>
 ; AVX512BW-SLOW-NEXT:    movl $138547332, %eax # imm = 0x8421084
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm12, %ymm26, %ymm16 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm13, %zmm13
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm12, %ymm26, %ymm17 {%k4}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm13, %zmm13
 ; AVX512BW-SLOW-NEXT:    movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k5
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm13, %zmm18 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm18, %ymm16
+; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm18, %ymm17
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <3,3,3,3,u,4,4,4>
-; AVX512BW-SLOW-NEXT:    vpermd %ymm17, %ymm13, %ymm27
+; AVX512BW-SLOW-NEXT:    vpermd %ymm16, %ymm13, %ymm27
 ; AVX512BW-SLOW-NEXT:    movl $1108378657, %eax # imm = 0x42108421
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm27, %ymm16 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm27, %ymm17 {%k3}
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,2]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,1,1]
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm18 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm18, %zmm14
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm16, %ymm26, %ymm18
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm18, %zmm14
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128>
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm17, %ymm26, %ymm18
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128]
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm18, %ymm25, %ymm28
@@ -4388,45 +4388,45 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm27, %zmm7
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm26, %zmm7 {%k6}
 ; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm23
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm24 = ymm17[2,2,3,3,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm24 = ymm16[2,2,3,3,6,6,7,7]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,3,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm24, %ymm23 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm17 = ymm17[0,2,1,1,4,6,5,5]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,3,3,2]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm16 = ymm16[0,2,1,1,4,6,5,5]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,3,3,2]
 ; AVX512BW-SLOW-NEXT:    movl $277094664, %eax # imm = 0x10842108
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm7 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm17
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdx), %xmm16
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm7, %zmm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rcx), %xmm23
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm23, %xmm19
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm21, %xmm17, %xmm21
-; AVX512BW-SLOW-NEXT:    vporq %xmm19, %xmm21, %xmm19
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm23[0],xmm17[0],xmm23[1],xmm17[1],xmm23[2],xmm17[2],xmm23[3],xmm17[3],xmm23[4],xmm17[4],xmm23[5],xmm17[5],xmm23[6],xmm17[6],xmm23[7],xmm17[7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm17, %xmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rsi), %xmm17
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm6, %zmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm20, %xmm16, %xmm20
+; AVX512BW-SLOW-NEXT:    vporq %xmm19, %xmm20, %xmm19
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm23[0],xmm16[0],xmm23[1],xmm16[1],xmm23[2],xmm16[2],xmm23[3],xmm16[3],xmm23[4],xmm16[4],xmm23[5],xmm16[5],xmm23[6],xmm16[6],xmm23[7],xmm16[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm16, %xmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rsi), %xmm16
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm6, %zmm6
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %xmm19
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm20, %xmm19, %xmm20
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm17, %xmm21
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm21, %xmm19, %xmm20
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm16, %xmm21
 ; AVX512BW-SLOW-NEXT:    vporq %xmm20, %xmm21, %xmm20
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3],xmm19[4],xmm17[4],xmm19[5],xmm17[5],xmm19[6],xmm17[6],xmm19[7],xmm17[7]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm19[0],xmm16[0],xmm19[1],xmm16[1],xmm19[2],xmm16[2],xmm19[3],xmm16[3],xmm19[4],xmm16[4],xmm19[5],xmm16[5],xmm19[6],xmm16[6],xmm19[7],xmm16[7]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm17, %xmm8
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm8, %zmm8
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm16, %xmm8
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm20, %zmm8, %zmm8
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,1,1,4,4,5,5]
 ; AVX512BW-SLOW-NEXT:    movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k2
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm6, %zmm8 {%k2}
 ; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm8, %ymm6
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm19 = xmm17[1,1,2,2]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %xmm16
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm19 = xmm16[1,1,2,2]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm19[0,1,1,1]
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm19, %ymm6 {%k4}
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm17 = xmm17[0,0,1,1]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,0,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm16 = xmm16[0,0,1,1]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,0,0,1]
 ; AVX512BW-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 4-byte Reload
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm8 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm8 {%k2}
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm6
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm15, %ymm1, %ymm8
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
@@ -4439,7 +4439,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermd %ymm3, %ymm11, %ymm1
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm1 {%k4}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm16, %ymm2, %ymm2
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm17, %ymm2, %ymm2
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm18, %ymm3, %ymm3
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
@@ -4472,20 +4472,20 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm19
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm19, %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm9, %xmm19, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm8
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm21
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm21, %xmm4
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm11, %xmm21, %xmm4
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm10
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm18
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm13, %xmm18, %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm11
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm12
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm20
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm20, %xmm3
@@ -4578,19 +4578,19 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermd %ymm4, %ymm25, %ymm22
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm22, %ymm18 {%k5}
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm18, %zmm18
-; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm10
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm12
-; AVX512BW-FAST-NEXT:    vpor %xmm10, %xmm12, %xmm10
+; AVX512BW-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm9
+; AVX512BW-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm11
+; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm11, %xmm9
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm7, %xmm7
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm7, %zmm7
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,0,1,1,4,4,5,5]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm8
-; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm10
-; AVX512BW-FAST-NEXT:    vpor %xmm8, %xmm10, %xmm8
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm8
+; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm9
+; AVX512BW-FAST-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm20, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm9, %zmm8
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k5

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 57fa4c0f8a86b..26408c2ed15ab 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -373,23 +373,21 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
 ; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT:    vextracti32x4 $2, %zmm2, %xmm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15]
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
-; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512F-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14],ymm3[15]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
-; AVX512F-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
-; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm1
+; AVX512F-NEXT:    vmovdqa %xmm0, 32(%rax)
 ; AVX512F-NEXT:    vmovdqa %ymm1, (%rax)
-; AVX512F-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -406,25 +404,23 @@ define void @store_i8_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
-; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm2, %xmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15]
-; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6],xmm2[7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,u,u,u,u,19,27,u,u,u,u,20,28,u,u,u,u]
 ; AVX512BW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,0,8,u,u,u,u,1,9,u,u,u,u,2,10,u,u,19,27,u,u,u,u,20,28,u,u,u,u,21,29]
-; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13,14],ymm3[15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7],ymm4[8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13,14],ymm3[15]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[2,10,1,9,0,8,3,11,u,u,u,u,4,12,u,u]
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
 ; AVX512BW-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512BW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-NEXT:    vmovdqu16 %ymm1, %ymm2 {%k1}
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT:    vmovdqa %ymm2, (%rax)
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
+; AVX512BW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,u,6,14],zero,zero,xmm0[u,u,7,15],zero,zero,xmm0[u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,13,u,u],zero,zero,xmm1[6,14,u,u],zero,zero,xmm1[7,15,u,u]
+; AVX512BW-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,5,13,u,u,u,u,6,14,u,u,u,u,7,15]
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm1
+; AVX512BW-NEXT:    vmovdqa %xmm0, 32(%rax)
+; AVX512BW-NEXT:    vmovdqa %ymm1, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <8 x i8>, ptr %in.vecptr0, align 64
@@ -1804,27 +1800,27 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vprold $16, %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[4],ymm13[4],ymm11[5],ymm13[5],ymm11[6],ymm13[6],ymm11[7],ymm13[7],ymm11[16],ymm13[16],ymm11[17],ymm13[17],ymm11[18],ymm13[18],ymm11[19],ymm13[19],ymm11[20],ymm13[20],ymm11[21],ymm13[21],ymm11[22],ymm13[22],ymm11[23],ymm13[23]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm14
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[4],ymm13[4],ymm11[5],ymm13[5],ymm11[6],ymm13[6],ymm11[7],ymm13[7],ymm11[16],ymm13[16],ymm11[17],ymm13[17],ymm11[18],ymm13[18],ymm11[19],ymm13[19],ymm11[20],ymm13[20],ymm11[21],ymm13[21],ymm11[22],ymm13[22],ymm11[23],ymm13[23]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm14
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm14
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm6
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[16],ymm9[16],ymm8[17],ymm9[17],ymm8[18],ymm9[18],ymm8[19],ymm9[19],ymm8[20],ymm9[20],ymm8[21],ymm9[21],ymm8[22],ymm9[22],ymm8[23],ymm9[23]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm5, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm12, %ymm15
 ; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm14
@@ -1859,31 +1855,31 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm9
 ; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm2
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm9, %zmm2
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,0,0,1,4,4,4,5]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
 ; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4043,7 +4039,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm13
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm0
@@ -4056,11 +4052,12 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, %xmm2
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm12, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm27
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm1
@@ -4071,17 +4068,17 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm3, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm3, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm28
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm14, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm13, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm4, %ymm1
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm4[8],ymm14[8],ymm4[9],ymm14[9],ymm4[10],ymm14[10],ymm4[11],ymm14[11],ymm4[12],ymm14[12],ymm4[13],ymm14[13],ymm4[14],ymm14[14],ymm4[15],ymm14[15],ymm4[24],ymm14[24],ymm4[25],ymm14[25],ymm4[26],ymm14[26],ymm4[27],ymm14[27],ymm4[28],ymm14[28],ymm4[29],ymm14[29],ymm4[30],ymm14[30],ymm4[31],ymm14[31]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm4[8],ymm13[8],ymm4[9],ymm13[9],ymm4[10],ymm13[10],ymm4[11],ymm13[11],ymm4[12],ymm13[12],ymm4[13],ymm13[13],ymm4[14],ymm13[14],ymm4[15],ymm13[15],ymm4[24],ymm13[24],ymm4[25],ymm13[25],ymm4[26],ymm13[26],ymm4[27],ymm13[27],ymm4[28],ymm13[28],ymm4[29],ymm13[29],ymm4[30],ymm13[30],ymm4[31],ymm13[31]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm29
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
@@ -4101,12 +4098,12 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm15
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[1],ymm0[1],ymm11[2],ymm0[2],ymm11[3],ymm0[3],ymm11[4],ymm0[4],ymm11[5],ymm0[5],ymm11[6],ymm0[6],ymm11[7],ymm0[7],ymm11[16],ymm0[16],ymm11[17],ymm0[17],ymm11[18],ymm0[18],ymm11[19],ymm0[19],ymm11[20],ymm0[20],ymm11[21],ymm0[21],ymm11[22],ymm0[22],ymm11[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11],ymm15[12],ymm1[12],ymm15[13],ymm1[13],ymm15[14],ymm1[14],ymm15[15],ymm1[15],ymm15[24],ymm1[24],ymm15[25],ymm1[25],ymm15[26],ymm1[26],ymm15[27],ymm1[27],ymm15[28],ymm1[28],ymm15[29],ymm1[29],ymm15[30],ymm1[30],ymm15[31],ymm1[31]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm1[8],ymm15[8],ymm1[9],ymm15[9],ymm1[10],ymm15[10],ymm1[11],ymm15[11],ymm1[12],ymm15[12],ymm1[13],ymm15[13],ymm1[14],ymm15[14],ymm1[15],ymm15[15],ymm1[24],ymm15[24],ymm1[25],ymm15[25],ymm1[26],ymm15[26],ymm1[27],ymm15[27],ymm1[28],ymm15[28],ymm1[29],ymm15[29],ymm1[30],ymm15[30],ymm1[31],ymm15[31]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm11, %ymm11
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
@@ -4126,7 +4123,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[4],ymm1[4],ymm15[5],ymm1[5],ymm15[6],ymm1[6],ymm15[7],ymm1[7],ymm15[16],ymm1[16],ymm15[17],ymm1[17],ymm15[18],ymm1[18],ymm15[19],ymm1[19],ymm15[20],ymm1[20],ymm15[21],ymm1[21],ymm15[22],ymm1[22],ymm15[23],ymm1[23]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm15[0],ymm1[1],ymm15[1],ymm1[2],ymm15[2],ymm1[3],ymm15[3],ymm1[4],ymm15[4],ymm1[5],ymm15[5],ymm1[6],ymm15[6],ymm1[7],ymm15[7],ymm1[16],ymm15[16],ymm1[17],ymm15[17],ymm1[18],ymm15[18],ymm1[19],ymm15[19],ymm1[20],ymm15[20],ymm1[21],ymm15[21],ymm1[22],ymm15[22],ymm1[23],ymm15[23]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
@@ -4382,13 +4379,13 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm25
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm23
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm23
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
@@ -4396,11 +4393,11 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm0[8],xmm12[9],xmm0[9],xmm12[10],xmm0[10],xmm12[11],xmm0[11],xmm12[12],xmm0[12],xmm12[13],xmm0[13],xmm12[14],xmm0[14],xmm12[15],xmm0[15]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm12, %zmm21
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm12, %zmm21
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm12
 ; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm0
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm22
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm22
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[4],ymm4[4],ymm5[5],ymm4[5],ymm5[6],ymm4[6],ymm5[7],ymm4[7],ymm5[16],ymm4[16],ymm5[17],ymm4[17],ymm5[18],ymm4[18],ymm5[19],ymm4[19],ymm5[20],ymm4[20],ymm5[21],ymm4[21],ymm5[22],ymm4[22],ymm5[23],ymm4[23]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm14

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 0dc809bd246c3..bc5b41354502a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -1498,78 +1498,77 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %xmm9
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm7, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm4, %ymm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,5],zero,ymm2[u,u,u,u,u,6],zero,ymm2[u,u,u,u,u],zero,ymm2[23,u,u,u,u,u],zero,ymm2[24,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm2[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u],zero,ymm11[5,u,u,u,u,u],zero,ymm11[6,u,u,u,u,u,23],zero,ymm11[u,u,u,u,u,24],zero,ymm11[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $50, %ymm10, %ymm12, %ymm11
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm2[0,2,0,2]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,5],zero,ymm1[u,u,u,u,u,6],zero,ymm1[u,u,u,u,u],zero,ymm1[23,u,u,u,u,u],zero,ymm1[24,u,u,u,u,u],zero
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u],zero,ymm13[5,u,u,u,u,u],zero,ymm13[6,u,u,u,u,u,23],zero,ymm13[u,u,u,u,u,24],zero,ymm13[u,u,u,u,u,25]
-; AVX512F-SLOW-NEXT:    vpternlogq $200, %ymm11, %ymm12, %ymm13
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm1[0,2,0,2]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,ymm11[u,u,u,1,9],zero,zero,ymm11[u,u,u,2,10],zero,zero,ymm11[u,u,u,19,27],zero,zero,ymm11[u,u,u,20,28],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm11, %zmm11
-; AVX512F-SLOW-NEXT:    vporq %zmm10, %zmm11, %zmm10
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,0]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm11, %ymm12, %ymm11
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm9[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
-; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm0[4],zero,ymm0[u,u,u,u,u,5],zero,ymm0[u,u,u,u,u,6],zero,ymm0[u,u,u,u,u],zero,ymm0[23,u,u,u,u,u],zero,ymm0[24,u,u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm0[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,ymm14[4,u,u,u,u,u],zero,ymm14[5,u,u,u,u,u],zero,ymm14[6,u,u,u,u,u,23],zero,ymm14[u,u,u,u,u,24],zero,ymm14[u,u]
-; AVX512F-SLOW-NEXT:    vpternlogq $200, %ymm13, %ymm12, %ymm14
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[0,2,0,2]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,0,8],zero,ymm12[u,u,u,u,1,9],zero,ymm12[u,u,u,u,18,26],zero,ymm12[u,u,u,u,19,27],zero,ymm12[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
-; AVX512F-SLOW-NEXT:    vporq %zmm11, %zmm12, %zmm11
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,xmm7[12,13,u,u,u],zero,zero,xmm7[14,15,u,u,u]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,12,13],zero,zero,xmm3[u,u,u,14,15],zero,zero,xmm3[u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,xmm9[13,u,u,u,u],zero,zero,xmm9[14,u,u,u,u],zero,zero,xmm9[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[10],zero,xmm4[u,u,u,u,13,12],zero,xmm4[u,u,u,u,15,14],zero
-; AVX512F-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,1,1,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[1],zero,zero,ymm1[u,u,u,10,2],zero,zero,ymm1[u,u,u,11,3],zero,zero,ymm1[u,u,u,20,28],zero,zero,ymm1[u,u,u,21,29],zero,zero,ymm1[u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,3,3,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,ymm2[1,9,u,u,u],zero,zero,ymm2[2,10,u,u,u],zero,zero,ymm2[3,19,u,u,u],zero,zero,ymm2[28,20,u,u,u],zero,zero,ymm2[29,21,u]
-; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,4,5,5,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,1,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,1,9],zero,ymm0[u,u,u,u,2,10],zero,ymm0[u,u,u,u,19,27],zero,ymm0[u,u,u,u,20,28],zero,ymm0[u,u,u,u,21]
-; AVX512F-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm1, 96(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %xmm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[12,13,u,u,u],zero,zero,xmm6[14,15,u,u,u]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,12,13],zero,zero,xmm4[u,u,u,14,15],zero,zero,xmm4[u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[10],zero,xmm5[u,u,u,u,13,12],zero,xmm5[u,u,u,u,15,14],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,xmm1[13,u,u,u,u],zero,zero,xmm1[14,u,u,u,u],zero,zero,xmm1[15]
+; AVX512F-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[3,1,1,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[1],zero,zero,ymm4[u,u,u,10,2],zero,zero,ymm4[u,u,u,11,3],zero,zero,ymm4[u,u,u,20,28],zero,zero,ymm4[u,u,u,21,29],zero,zero,ymm4[u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[1,3,3,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,ymm6[1,9,u,u,u],zero,zero,ymm6[2,10,u,u,u],zero,zero,ymm6[3,19,u,u,u],zero,zero,ymm6[28,20,u,u,u],zero,zero,ymm6[29,21,u]
+; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm6, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[1,3,1,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
+; AVX512F-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm7, %zmm4
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm3[u,u,u,u,u,5],zero,ymm3[u,u,u,u,u,6],zero,ymm3[u,u,u,u,u],zero,ymm3[23,u,u,u,u,u],zero,ymm3[24,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $50, %ymm6, %ymm8, %ymm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,0,2]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,ymm3[18,26,u,u,u],zero,zero,ymm3[19,27,u,u,u],zero,zero,ymm3[20,28]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm2[u,u,u,5],zero,ymm2[u,u,u,u,u,6],zero,ymm2[u,u,u,u,u],zero,ymm2[23,u,u,u,u,u],zero,ymm2[24,u,u,u,u,u],zero
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u,u,25]
+; AVX512F-SLOW-NEXT:    vpternlogq $200, %ymm6, %ymm8, %ymm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,0,2]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10],zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vporq %zmm3, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,0]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
+; AVX512F-SLOW-NEXT:    vpandn %ymm3, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[4],zero,ymm0[u,u,u,u,u,5],zero,ymm0[u,u,u,u,u,6],zero,ymm0[u,u,u,u,u],zero,ymm0[23,u,u,u,u,u],zero,ymm0[24,u,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,ymm7[4,u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u]
+; AVX512F-SLOW-NEXT:    vpternlogq $200, %ymm3, %ymm6, %ymm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,0,2]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,8],zero,ymm0[u,u,u,u,1,9],zero,ymm0[u,u,u,u,18,26],zero,ymm0[u,u,u,u,19,27],zero,ymm0[u,u,u,u]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, 96(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, 64(%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -1577,71 +1576,70 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm6
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm7
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm8
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm1, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm8[0,2,0,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [1,5,2,6,1,5,2,6]
-; AVX512F-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm11, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u],zero,zero,ymm12[1,5,u,u,u],zero,zero,ymm12[2,6,u,u,u],zero,zero,ymm12[19,23,u,u,u],zero,zero,ymm12[24,28,u,u,u],zero
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm7[0,2,0,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[0,8],zero,zero,ymm12[u,u,u,1,9],zero,zero,ymm12[u,u,u,2,10],zero,zero,ymm12[u,u,u,19,27],zero,zero,ymm12[u,u,u,20,28],zero,zero
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm11, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,1,5],zero,zero,ymm13[u,u,u,2,6],zero,zero,ymm13[u,u,u,19,23],zero,zero,ymm13[u,u,u,24,28],zero,zero,ymm13[u,u,u,25]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512F-FAST-NEXT:    vporq %zmm10, %zmm12, %zmm10
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm6[1,1,0,0,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,1,u,1,u,0,0,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm13, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm6[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm9[0,2,0,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,0,8],zero,ymm13[u,u,u,u,1,9],zero,ymm13[u,u,u,u,18,26],zero,ymm13[u,u,u,u,19,27],zero,ymm13[u,u,u,u]
-; AVX512F-FAST-NEXT:    vpermd %ymm9, %ymm11, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,4],zero,ymm11[u,u,u,u,1,5],zero,ymm11[u,u,u,u,2,6],zero,ymm11[u,u,u,u,19,23],zero,ymm11[u,u,u,u,24,28],zero,ymm11[u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm0
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm6
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm7
+; AVX512F-FAST-NEXT:    vinserti128 $1, (%r9), %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[12,13,u,u,u],zero,zero,xmm4[14,15,u,u,u]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13],zero,zero,xmm0[u,u,u,14,15],zero,zero,xmm0[u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,xmm6[13,u,u,u,u],zero,zero,xmm6[14,u,u,u,u],zero,zero,xmm6[15]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm7[3,1,1,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,ymm0[u,u,u,10,2],zero,zero,ymm0[u,u,u,11,3],zero,zero,ymm0[u,u,u,20,28],zero,zero,ymm0[u,u,u,21,29],zero,zero,ymm0[u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm8[1,3,3,1]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,ymm2[1,9,u,u,u],zero,zero,ymm2[2,10,u,u,u],zero,zero,ymm2[3,19,u,u,u],zero,zero,ymm2[28,20,u,u,u],zero,zero,ymm2[29,21,u]
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm6[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [2,2,3,3,2,2,3,3]
-; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[1,3,1,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,1,9],zero,ymm3[u,u,u,u,2,10],zero,ymm3[u,u,u,u,19,27],zero,ymm3[u,u,u,u,20,28],zero,ymm3[u,u,u,u,21]
-; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, 64(%rax)
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 96(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,12,13],zero,zero,xmm2[u,u,u,14,15],zero,zero,xmm2[u,u,u]
+; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10],zero,xmm3[u,u,u,u,13,12],zero,xmm3[u,u,u,u,15,14],zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
+; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm7[3,1,1,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[1],zero,zero,ymm2[u,u,u,10,2],zero,zero,ymm2[u,u,u,11,3],zero,zero,ymm2[u,u,u,20,28],zero,zero,ymm2[u,u,u,21,29],zero,zero,ymm2[u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm6[1,3,3,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,ymm4[1,9,u,u,u],zero,zero,ymm4[2,10,u,u,u],zero,zero,ymm4[3,19,u,u,u],zero,zero,ymm4[28,20,u,u,u],zero,zero,ymm4[29,21,u]
+; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [2,2,3,3,2,2,3,3]
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm5, %ymm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[1,3,1,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,1,9],zero,ymm5[u,u,u,u,2,10],zero,ymm5[u,u,u,u,19,27],zero,ymm5[u,u,u,u,20,28],zero,ymm5[u,u,u,u,21]
+; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm6[0,2,0,2]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[0,8,u,u,u],zero,zero,ymm4[1,9,u,u,u],zero,zero,ymm4[18,26,u,u,u],zero,zero,ymm4[19,27,u,u,u],zero,zero,ymm4[20,28]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [1,5,2,6,1,5,2,6]
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm5, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u],zero,zero,ymm6[1,5,u,u,u],zero,zero,ymm6[2,6,u,u,u],zero,zero,ymm6[19,23,u,u,u],zero,zero,ymm6[24,28,u,u,u],zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm7[0,2,0,2]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,u,u,1,9],zero,zero,ymm6[u,u,u,2,10],zero,zero,ymm6[u,u,u,19,27],zero,zero,ymm6[u,u,u,20,28],zero,zero
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm5, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,5],zero,zero,ymm7[u,u,u,2,6],zero,zero,ymm7[u,u,u,19,23],zero,zero,ymm7[u,u,u,24,28],zero,zero,ymm7[u,u,u,25]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vporq %zmm4, %zmm6, %zmm4
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[1,1,0,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,1,u,1,u,0,0,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm7, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm1[0,2,0,2]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8],zero,ymm6[u,u,u,u,1,9],zero,ymm6[u,u,u,u,18,26],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u]
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,4],zero,ymm1[u,u,u,u,1,5],zero,ymm1[u,u,u,u,2,6],zero,ymm1[u,u,u,u,19,23],zero,ymm1[u,u,u,u,24,28],zero,ymm1[u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm3, 96(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -1650,89 +1648,90 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r10), %xmm2
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm0
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm4, %ymm1
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm8, %ymm5
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm1[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,ymm11[5],zero,zero,zero,zero,zero,zero,ymm11[6],zero,zero,zero,zero,zero,ymm11[23],zero,zero,zero,zero,zero,zero,ymm11[24],zero,zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %ymm10, %ymm11, %ymm10
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,ymm12[5],zero,zero,zero,zero,zero,zero,ymm12[6],zero,zero,zero,zero,zero,ymm12[23],zero,zero,zero,zero,zero,zero,ymm12[24],zero,zero,zero,zero,zero,zero,ymm12[25]
-; AVX512BW-SLOW-NEXT:    vpor %ymm12, %ymm11, %ymm11
-; AVX512BW-SLOW-NEXT:    movl $202911840, %ecx # imm = 0xC183060
-; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm10, %ymm11 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm5[4],zero,zero,zero,zero,zero,zero,ymm5[5],zero,zero,zero,zero,zero,zero,ymm5[6],zero,zero,zero,zero,zero,zero,zero,ymm5[23],zero,zero,zero,zero,zero,zero,ymm5[24],zero,zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,ymm12[4],zero,zero,zero,zero,zero,zero,ymm12[5],zero,zero,zero,zero,zero,zero,ymm12[6],zero,zero,zero,zero,zero,ymm12[23],zero,zero,zero,zero,zero,zero,ymm12[24],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %ymm10, %ymm12, %ymm10
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm2, %ymm12, %ymm12
-; AVX512BW-SLOW-NEXT:    movl $1082196484, %ecx # imm = 0x40810204
-; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm12, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    movl $1893843847, %ecx # imm = 0x70E1C387
-; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm10, %ymm11 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm2, %ymm10, %ymm10
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm5[0,2,0,2]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    movl $135274560, %ecx # imm = 0x8102040
-; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm10, %ymm12 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,2,0,2]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,8],zero,zero,zero,zero,zero,ymm10[1,9],zero,zero,zero,zero,zero,ymm10[2,10],zero,zero,zero,zero,zero,ymm10[19,27],zero,zero,zero,zero,zero,ymm10[20,28],zero,zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm1[0,2,0,2]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,ymm13[0,8],zero,zero,zero,zero,zero,ymm13[1,9],zero,zero,zero,zero,zero,ymm13[18,26],zero,zero,zero,zero,zero,ymm13[19,27],zero,zero,zero,zero,zero,ymm13[20,28]
-; AVX512BW-SLOW-NEXT:    vpor %ymm10, %ymm13, %ymm10
-; AVX512BW-SLOW-NEXT:    movl $236730480, %ecx # imm = 0xE1C3870
-; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm12, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
-; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,6,7,7,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm3, %ymm0
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm6, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, (%r9), %ymm2, %ymm2
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, (%r10), %zmm2, %zmm2
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm4[10,u,u,u,u,u,13,12,u,u,u,u,u,15,14,u]
+; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm4
+; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm4[0,1,2,3,6,7,7,7]
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,3,2]
 ; AVX512BW-SLOW-NEXT:    movw $-32510, %cx # imm = 0x8102
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %xmm9, %xmm8 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[12,13],zero,zero,zero,zero,zero,xmm3[14,15],zero,zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,zero,xmm4[12,13],zero,zero,zero,zero,zero,xmm4[14,15],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,xmm5[12,13],zero,zero,zero,zero,zero,xmm5[14,15],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
 ; AVX512BW-SLOW-NEXT:    movw $-7741, %cx # imm = 0xE1C3
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %xmm8, %xmm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-SLOW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm2, %ymm4, %ymm2
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm5[1,3,1,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-SLOW-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm5, %ymm5
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm2[1,3,1,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
 ; AVX512BW-SLOW-NEXT:    movl $67637280, %ecx # imm = 0x4081020
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm4 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,3,3,1]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,ymm1[1,9],zero,zero,zero,zero,zero,ymm1[2,10],zero,zero,zero,zero,zero,ymm1[3,19],zero,zero,zero,zero,zero,ymm1[28,20],zero,zero,zero,zero,zero,ymm1[29,21],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,1,1,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1],zero,zero,zero,zero,zero,ymm0[10,2],zero,zero,zero,zero,zero,ymm0[11,3],zero,zero,zero,zero,zero,ymm0[20,28],zero,zero,zero,zero,zero,ymm0[21,29],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm5, %ymm6 {%k1}
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[1,3,3,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,ymm5[1,9],zero,zero,zero,zero,zero,ymm5[2,10],zero,zero,zero,zero,zero,ymm5[3,19],zero,zero,zero,zero,zero,ymm5[28,20],zero,zero,zero,zero,zero,ymm5[29,21],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[3,1,1,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,zero,zero,zero,ymm7[10,2],zero,zero,zero,zero,zero,ymm7[11,3],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero,zero,zero,zero,ymm7[21,29],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %ymm5, %ymm7, %ymm5
 ; AVX512BW-SLOW-NEXT:    movl $-2029118408, %ecx # imm = 0x870E1C38
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm0, 64(%rax)
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $2, %zmm1, 96(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm5 {%k1}
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm5
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,ymm1[5],zero,zero,zero,zero,zero,zero,ymm1[6],zero,zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,ymm1[24],zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,ymm7[5],zero,zero,zero,zero,zero,zero,ymm7[6],zero,zero,zero,zero,zero,ymm7[23],zero,zero,zero,zero,zero,zero,ymm7[24],zero,zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %ymm6, %ymm7, %ymm6
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,ymm0[6],zero,zero,zero,zero,zero,zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,ymm0[24],zero,zero,zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero,zero,zero,zero,ymm8[25]
+; AVX512BW-SLOW-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-SLOW-NEXT:    movl $202911840, %ecx # imm = 0xC183060
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm2[4],zero,zero,zero,zero,zero,zero,ymm2[5],zero,zero,zero,zero,zero,zero,ymm2[6],zero,zero,zero,zero,zero,zero,zero,ymm2[23],zero,zero,zero,zero,zero,zero,ymm2[24],zero,zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm2[2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,ymm8[4],zero,zero,zero,zero,zero,zero,ymm8[5],zero,zero,zero,zero,zero,zero,ymm8[6],zero,zero,zero,zero,zero,ymm8[23],zero,zero,zero,zero,zero,zero,ymm8[24],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm8, %ymm8
+; AVX512BW-SLOW-NEXT:    movl $1082196484, %ecx # imm = 0x40810204
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm6 {%k1}
+; AVX512BW-SLOW-NEXT:    movl $1893843847, %ecx # imm = 0x70E1C387
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm6, %ymm4
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,0,2]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    movl $135274560, %ecx # imm = 0x8102040
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm2 {%k1}
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,0,2]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero,ymm0[20,28],zero,zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,0,2]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,ymm1[18,26],zero,zero,zero,zero,zero,ymm1[19,27],zero,zero,zero,zero,zero,ymm1[20,28]
+; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512BW-SLOW-NEXT:    movl $236730480, %ecx # imm = 0xE1C3870
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k1}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm3, 96(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm5, 64(%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
 ;
@@ -1740,81 +1739,82 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa (%r10), %xmm1
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm7
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm8
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-FAST-NEXT:    vpermw %ymm1, %ymm10, %ymm10
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [1,5,2,6,1,5,2,6]
-; AVX512BW-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermd %ymm9, %ymm11, %ymm12
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[0,4,u,u,u,u,u,1,5,u,u,u,u,u,2,6,u,u,u,u,u,19,23,u,u,u,u,u,24,28,u,u]
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm6
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm0
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm1
+; AVX512BW-FAST-NEXT:    vinserti128 $1, (%r9), %ymm6, %ymm6
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, (%r10), %zmm6, %zmm6
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[12,13],zero,zero,zero,zero,zero,xmm2[14,15],zero,zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,xmm3[12,13],zero,zero,zero,zero,zero,xmm3[14,15],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm3
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10],zero,zero,zero,zero,zero,xmm3[13,12],zero,zero,zero,zero,zero,xmm3[15,14],zero
+; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm4
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,xmm4[13],zero,zero,zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,zero,zero,xmm4[15]
+; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512BW-FAST-NEXT:    movw $-7741, %cx # imm = 0xE1C3
+; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu8 %xmm3, %xmm2 {%k1}
+; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm6[1,3,1,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
+; AVX512BW-FAST-NEXT:    movl $67637280, %ecx # imm = 0x4081020
+; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm3, %ymm5 {%k1}
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[1,3,3,1]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,ymm3[1,9],zero,zero,zero,zero,zero,ymm3[2,10],zero,zero,zero,zero,zero,ymm3[3,19],zero,zero,zero,zero,zero,ymm3[28,20],zero,zero,zero,zero,zero,ymm3[29,21],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[3,1,1,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,zero,zero,zero,ymm7[10,2],zero,zero,zero,zero,zero,ymm7[11,3],zero,zero,zero,zero,zero,ymm7[20,28],zero,zero,zero,zero,zero,ymm7[21,29],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpor %ymm3, %ymm7, %ymm3
+; AVX512BW-FAST-NEXT:    movl $-2029118408, %ecx # imm = 0x870E1C38
+; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm5, %ymm3 {%k1}
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm5, %ymm5
+; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [1,5,2,6,1,5,2,6]
+; AVX512BW-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vpermd %ymm6, %ymm7, %ymm8
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[0,4,u,u,u,u,u,1,5,u,u,u,u,u,2,6,u,u,u,u,u,19,23,u,u,u,u,u,24,28,u,u]
 ; AVX512BW-FAST-NEXT:    movl $1082196484, %ecx # imm = 0x40810204
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm10, %ymm12 {%k1}
-; AVX512BW-FAST-NEXT:    vpermd %ymm8, %ymm11, %ymm10
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,ymm10[1,5],zero,zero,zero,zero,zero,ymm10[2,6],zero,zero,zero,zero,zero,ymm10[19,23],zero,zero,zero,zero,zero,ymm10[24,28],zero,zero,zero,zero,zero,ymm10[25]
-; AVX512BW-FAST-NEXT:    vpermd %ymm7, %ymm11, %ymm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,ymm11[1,5],zero,zero,zero,zero,zero,ymm11[2,6],zero,zero,zero,zero,zero,ymm11[19,23],zero,zero,zero,zero,zero,ymm11[24,28],zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %ymm10, %ymm11, %ymm10
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm5, %ymm8 {%k1}
+; AVX512BW-FAST-NEXT:    vpermd %ymm0, %ymm7, %ymm5
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm5[1,5],zero,zero,zero,zero,zero,ymm5[2,6],zero,zero,zero,zero,zero,ymm5[19,23],zero,zero,zero,zero,zero,ymm5[24,28],zero,zero,zero,zero,zero,ymm5[25]
+; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm7, %ymm7
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,ymm7[1,5],zero,zero,zero,zero,zero,ymm7[2,6],zero,zero,zero,zero,zero,ymm7[19,23],zero,zero,zero,zero,zero,ymm7[24,28],zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpor %ymm5, %ymm7, %ymm5
 ; AVX512BW-FAST-NEXT:    movl $1893843847, %ecx # imm = 0x70E1C387
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm12, %ymm10 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
-; AVX512BW-FAST-NEXT:    vpermw %ymm1, %ymm11, %ymm11
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm9[0,2,0,2]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm8, %ymm5 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
+; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm7, %ymm4
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8,u,u,u,u,u,1,9,u,u,u,u,u,18,26,u,u,u,u,u,19,27,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    movl $135274560, %ecx # imm = 0x8102040
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm12 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm8[0,2,0,2]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,zero,zero,zero,ymm11[1,9],zero,zero,zero,zero,zero,ymm11[2,10],zero,zero,zero,zero,zero,ymm11[19,27],zero,zero,zero,zero,zero,ymm11[20,28],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm7[0,2,0,2]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,ymm13[0,8],zero,zero,zero,zero,zero,ymm13[1,9],zero,zero,zero,zero,zero,ymm13[18,26],zero,zero,zero,zero,zero,ymm13[19,27],zero,zero,zero,zero,zero,ymm13[20,28]
-; AVX512BW-FAST-NEXT:    vpor %ymm11, %ymm13, %ymm11
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm4, %ymm6 {%k1}
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,0,2]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,8],zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,zero,ymm0[2,10],zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero,ymm0[20,28],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,0,2]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,ymm1[18,26],zero,zero,zero,zero,zero,ymm1[19,27],zero,zero,zero,zero,zero,ymm1[20,28]
+; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512BW-FAST-NEXT:    movl $236730480, %ecx # imm = 0xE1C3870
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm12, %ymm11 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermw %ymm1, %ymm11, %ymm11
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[1,3,1,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,1,9,u,u,u,u,u,2,10,u,u,u,u,u,19,27,u,u,u,u,u,20,28,u,u,u,u,u,21]
-; AVX512BW-FAST-NEXT:    movl $67637280, %ecx # imm = 0x4081020
-; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm9 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[1,3,3,1]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,ymm7[1,9],zero,zero,zero,zero,zero,ymm7[2,10],zero,zero,zero,zero,zero,ymm7[3,19],zero,zero,zero,zero,zero,ymm7[28,20],zero,zero,zero,zero,zero,ymm7[29,21],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,1,1,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,zero,zero,zero,ymm8[10,2],zero,zero,zero,zero,zero,ymm8[11,3],zero,zero,zero,zero,zero,ymm8[20,28],zero,zero,zero,zero,zero,ymm8[21,29],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FAST-NEXT:    movl $-2029118408, %ecx # imm = 0x870E1C38
-; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm9, %ymm7 {%k1}
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[12,13],zero,zero,zero,zero,zero,xmm4[14,15],zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm6[8],xmm3[9],xmm6[9],xmm3[10],xmm6[10],xmm3[11],xmm6[11],xmm3[12],xmm6[12],xmm3[13],xmm6[13],xmm3[14],xmm6[14],xmm3[15],xmm6[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,xmm3[12,13],zero,zero,zero,zero,zero,xmm3[14,15],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[10],zero,zero,zero,zero,zero,xmm0[13,12],zero,zero,zero,zero,zero,xmm0[15,14],zero
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm1[13],zero,zero,zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,zero,zero,xmm1[15]
-; AVX512BW-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    movw $-7741, %cx # imm = 0xE1C3
-; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %xmm0, %xmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm7, 64(%rax)
-; AVX512BW-FAST-NEXT:    vextracti32x4 $2, %zmm0, 96(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm6, %ymm0 {%k1}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa %xmm2, 96(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm3, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64
@@ -3353,7 +3353,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm10, %xmm7
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm10, %zmm7
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm13[u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6,u,u]
@@ -3362,7 +3362,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpor %xmm10, %xmm11, %xmm10
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm11, %zmm10
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm10
@@ -3372,7 +3372,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm12, %xmm7
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm7
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm12, %zmm7
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm16 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %xmm12
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
@@ -3505,7 +3505,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpor %xmm7, %xmm10, %xmm7
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm10, %zmm7
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm11
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u]
@@ -3514,7 +3514,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpor %xmm10, %xmm13, %xmm10
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm13, %zmm10
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm13, %zmm10
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm10
@@ -3531,7 +3531,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpor %xmm7, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm7, %zmm0
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm0[0,1,0,1,4,5,4,5]
 ; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
@@ -7284,9 +7284,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm7
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm7, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7299,8 +7299,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <13,u,u,u,u,u,128,14,u,u,u,u,u,128,15,u,u,u,u,u,128,16,u,u,u,u,u,128,17,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm26
-; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm29
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm24
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm12
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
@@ -7319,9 +7319,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm11
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm7, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm3
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -7336,8 +7336,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[18],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm6[23],zero,ymm6[21,22,23,26],zero,ymm6[24],zero,ymm6[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm6[18,19,20,21],zero,ymm6[19],zero,ymm6[25,26,27,22],zero,ymm6[20],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm7[23],zero,ymm7[21,22,23,26],zero,ymm7[24],zero,ymm7[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm7[18,19,20,21],zero,ymm7[19],zero,ymm7[25,26,27,22],zero,ymm7[20],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm26
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm15, %ymm0
@@ -7347,34 +7348,34 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm30
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm21
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vporq %xmm2, %xmm3, %xmm22
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm20
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm24
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm17
-; AVX512F-SLOW-NEXT:    vporq %xmm2, %xmm3, %xmm31
+; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm3
@@ -7385,47 +7386,46 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm15
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm7, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm7, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, %ymm13
 ; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm28
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm25
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm3
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm12, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm25
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
 ; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm4
 ; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm5
 ; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
 ; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,0,1],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm0
@@ -7439,18 +7439,19 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm21
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u>
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[27],zero,zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm1
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
@@ -7462,76 +7463,76 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm15, %ymm3
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm15, %ymm15
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm20
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm23
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm12
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm23
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm18
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm27
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm27
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm31
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm22
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm22, %zmm22
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm15
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm31[0,1,0,1],zmm15[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm15[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm13
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, %ymm15
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm29
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7542,28 +7543,27 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
 ; AVX512F-SLOW-NEXT:    vpandnq %ymm15, %ymm25, %ymm15
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm6, %zmm6
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3],xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm25 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm2[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm25 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm30[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm24[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm10[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,2,3,3,6,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm9[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm13[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm19[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm20[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm19[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm17[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
@@ -7582,7 +7582,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vporq %zmm9, %zmm8, %zmm8
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm23[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
 ; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm11
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
@@ -7594,7 +7594,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vporq %zmm0, %zmm9, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm23[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm24[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm17
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm17
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
@@ -7615,9 +7615,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm20 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm20 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm31[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
@@ -7638,7 +7637,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm10[1,1,0,0,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm31[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm15 = mem[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm29[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm18 = mem[2,3,2,3]
@@ -7708,31 +7708,32 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $1448, %rsp # imm = 0x5A8
+; AVX512F-FAST-NEXT:    subq $1432, %rsp # imm = 0x598
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm13, %ymm16
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm10
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero,ymm15[27],zero,ymm15[25]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero,zero,zero,ymm8[27],zero,ymm8[25]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm1
@@ -7744,293 +7745,294 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,zero,zero,ymm0[18]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm26
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm29
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm31
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <13,u,u,u,u,u,128,14,u,u,u,u,u,128,15,u,u,u,u,u,128,16,u,u,u,u,u,128,17,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[13,u,u,u,u,u],zero,ymm1[14,u,u,u,u,u],zero,ymm1[15,u,u,u,u,u],zero,ymm1[16,u,u,u,u,u],zero,ymm1[17,u,u,u]
 ; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm22
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm27
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm7, %xmm4
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm4, %xmm25
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm3, %xmm21
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm12
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm12, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm12, %xmm29
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm20
-; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm7, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,ymm14[14],zero,zero,zero,zero,zero,zero,ymm14[15],zero,zero,zero,zero,zero,zero,ymm14[16],zero,zero,zero,zero,zero,zero,ymm14[17],zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm7, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,ymm10[14],zero,zero,zero,zero,zero,zero,ymm10[15],zero,zero,zero,zero,zero,zero,ymm10[16],zero,zero,zero,zero,zero,zero,ymm10[17],zero,zero,zero,zero,zero,zero,ymm10[18]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm15[0,1,14],zero,ymm15[12,13,0,1,14,15],zero,ymm15[3,12,13,2,3,16],zero,ymm15[30,31,28,29,16,17],zero,ymm15[31,18,19,28,29,18],zero
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm7, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,ymm11[14],zero,zero,zero,zero,zero,zero,ymm11[15],zero,zero,zero,zero,zero,zero,ymm11[16],zero,zero,zero,zero,zero,zero,ymm11[17],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm7
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm7, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm5, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm5
+; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm12, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,ymm10[14],zero,zero,zero,zero,zero,zero,ymm10[15],zero,zero,zero,zero,zero,zero,ymm10[16],zero,zero,zero,zero,zero,zero,ymm10[17],zero,zero,zero,zero,zero,zero,ymm10[18]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm10, %ymm18
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm8[0,1,14],zero,ymm8[12,13,0,1,14,15],zero,ymm8[3,12,13,2,3,16],zero,ymm8[30,31,28,29,16,17],zero,ymm8[31,18,19,28,29,18],zero
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm10
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm9, %xmm4
+; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm2
 ; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm6
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero,zero,zero,ymm1[27],zero,ymm1[25]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[18],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22],zero,ymm7[20]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27>
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm2[18,19,20,21],zero,ymm2[19],zero,ymm2[25,26,27,22],zero,ymm2[20],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [2,2,3,3,2,2,3,3]
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512F-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm7, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm4, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm3, %zmm22
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
 ; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm8, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm11, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
 ; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm15[0,1,2,3],zmm8[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm28
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm21
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm10, %xmm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm10, %xmm25
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm30
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm13, %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm6
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm13[8],xmm6[8],xmm13[9],xmm6[9],xmm13[10],xmm6[10],xmm13[11],xmm6[11],xmm13[12],xmm6[12],xmm13[13],xmm6[13],xmm13[14],xmm6[14],xmm13[15],xmm6[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm25, %zmm27
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm10
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm11
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3],xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm14, %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm14[8],xmm5[8],xmm14[9],xmm5[9],xmm14[10],xmm5[10],xmm14[11],xmm5[11],xmm14[12],xmm5[12],xmm14[13],xmm5[13],xmm14[14],xmm5[14],xmm14[15],xmm5[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm21, %zmm30
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm3[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm10[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm11
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm10
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,5,6]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm6, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[18],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm0[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm7[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm4[18,19,20,21],zero,ymm4[19],zero,ymm4[25,26,27,22],zero,ymm4[20],zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm7[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm3[18,19,20,21],zero,ymm3[19],zero,ymm3[25,26,27,22],zero,ymm3[20],zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm7
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
 ; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm28, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm26, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29>
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29>
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm13
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm15, %ymm13
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm13[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm14[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm14[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm14
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm14
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm16[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm15, %ymm15
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm2
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm4, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm5, %ymm10, %ymm31
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm4, %ymm11, %ymm31
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm31, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm10, %zmm9
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm11, %zmm9
+; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm12, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
 ; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm15, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm10, %zmm3
-; AVX512F-FAST-NEXT:    vpandq %ymm5, %ymm25, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm11, %zmm3
+; AVX512F-FAST-NEXT:    vpandq %ymm4, %ymm21, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm20, %zmm1
 ; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-FAST-NEXT:    vporq %zmm4, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm18, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm19, %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm4, %zmm4
 ; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-FAST-NEXT:    vporq %zmm5, %zmm4, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
-; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm17, %ymm1
+; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm18, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm13, %zmm1
 ; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-FAST-NEXT:    vporq %zmm5, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
 ; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm30[0,1,0,1,4,5,4,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm27[0,1,0,1,4,5,4,5]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm5, %zmm10
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm30[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm5, %zmm11
 ; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
@@ -8048,23 +8050,23 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
 ; AVX512F-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # zmm16 = zmm0[0,1,0,1],mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[1,1,0,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[1,1,0,0,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,0,1,2,0,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm5, %ymm18
 ; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm0[1,1,0,0,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm5, %ymm17
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
 ; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm12
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
-; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero
 ; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm4
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm6[23],zero,ymm6[23,24,25,26],zero,ymm6[24],zero,ymm6[30,31]
@@ -8083,7 +8085,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm21 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,0]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,0]
@@ -8101,10 +8103,10 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm6, %zmm1
 ; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm29
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm29
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm1 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm1
 ; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm14, %ymm5
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
@@ -8133,16 +8135,17 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 64(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 384(%rax)
-; AVX512F-FAST-NEXT:    addq $1448, %rsp # imm = 0x5A8
+; AVX512F-FAST-NEXT:    addq $1432, %rsp # imm = 0x598
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
 ; AVX512BW-SLOW-LABEL: store_i8_stride7_vf64:
 ; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    subq $152, %rsp
+; AVX512BW-SLOW-NEXT:    subq $216, %rsp
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm14
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %zmm14
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm5
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm20
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm18
 ; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm18[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
@@ -8151,22 +8154,22 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k1
 ; AVX512BW-SLOW-NEXT:    kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k1} = ymm20[u,u,u,14,u,12,u,u,u,u,15,u,13,u,u,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm20[23],zero,zero,zero,zero,ymm20[26],zero,ymm20[24],zero,zero,zero,zero,ymm20[27],zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm19, %ymm18, %ymm3
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm15
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[24,25],zero,ymm15[23],zero,ymm15[21,22,23,26],zero,ymm15[24],zero,ymm15[28,29,26,27]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm12
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm13
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm12[11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
 ; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,2,3,3,4,6,7,7]
 ; AVX512BW-SLOW-NEXT:    movl $676341840, %r10d # imm = 0x28502850
@@ -8176,142 +8179,142 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512BW-SLOW-NEXT:    movabsq $1742999440035548184, %r10 # imm = 0x183060C183060C18
 ; AVX512BW-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r8), %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm1[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r8), %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm0[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm2[23],zero,zmm2[23,24,25,26],zero,zmm2[24],zero,zmm2[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm2[59],zero,zero,zero,zero,zmm2[62],zero,zmm2[60],zero,zero,zero,zero,zmm2[63],zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm1[0,1,2,3],zmm16[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r9), %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm0[0,1,2,3],zmm14[4,5,6,7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[25],zero,zmm3[23],zero,zero,zero,zero,zmm3[26],zero,zmm3[24],zero,zero,zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[59],zero,zero,zero,zero,zmm3[62],zero,zmm3[60],zero,zero,zero,zero,zmm3[63],zero,zmm3[61]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vporq %zmm2, %zmm3, %zmm2
 ; AVX512BW-SLOW-NEXT:    movabsq $6971997760142192736, %r10 # imm = 0x60C183060C183060
 ; AVX512BW-SLOW-NEXT:    kmovq %r10, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k5}
+; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rax), %ymm9
-; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512BW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm1, %ymm3
+; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
+; AVX512BW-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm0, %ymm3
 ; AVX512BW-SLOW-NEXT:    movl $-2130574328, %r10d # imm = 0x81020408
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k3
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm2 {%k3}
-; AVX512BW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm24 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-SLOW-NEXT:    # ymm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm24, %ymm3
+; AVX512BW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-SLOW-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm26, %ymm3
 ; AVX512BW-SLOW-NEXT:    movl $270549121, %r10d # imm = 0x10204081
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm0 {%k6}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm1 {%k6}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rcx), %xmm21
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm21, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm21, %xmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdx), %xmm22
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm22, %xmm4
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm22, %xmm4
+; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm21[8],xmm22[8],xmm21[9],xmm22[9],xmm21[10],xmm22[10],xmm21[11],xmm22[11],xmm21[12],xmm22[12],xmm21[13],xmm22[13],xmm21[14],xmm22[14],xmm21[15],xmm22[15]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %xmm27
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm27, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %xmm30
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm30, %xmm7
-; AVX512BW-SLOW-NEXT:    vpor %xmm5, %xmm7, %xmm5
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm27[8],xmm30[8],xmm27[9],xmm30[9],xmm27[10],xmm30[10],xmm27[11],xmm30[11],xmm27[12],xmm30[12],xmm27[13],xmm30[13],xmm27[14],xmm30[14],xmm27[15],xmm30[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm5[0,1,0,1,4,5,4,5]
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm11 {%k5}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm23
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm23, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm28
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm28, %xmm7
-; AVX512BW-SLOW-NEXT:    vpor %xmm5, %xmm7, %xmm5
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm28[8],xmm23[8],xmm28[9],xmm23[9],xmm28[10],xmm23[10],xmm28[11],xmm23[11],xmm28[12],xmm23[12],xmm28[13],xmm23[13],xmm28[14],xmm23[14],xmm28[15],xmm23[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm1
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %xmm28
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm28, %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm10, %xmm8
+; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm28[8],xmm10[8],xmm28[9],xmm10[9],xmm28[10],xmm10[10],xmm28[11],xmm10[11],xmm28[12],xmm10[12],xmm28[13],xmm10[13],xmm28[14],xmm10[14],xmm28[15],xmm10[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm7[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm12 {%k5}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r9), %xmm25
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm25, %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r8), %xmm27
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm27, %xmm8
+; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm27[8],xmm25[8],xmm27[9],xmm25[9],xmm27[10],xmm25[10],xmm27[11],xmm25[11],xmm27[12],xmm25[12],xmm27[13],xmm25[13],xmm27[14],xmm25[14],xmm27[15],xmm25[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    movl $67637280, %r10d # imm = 0x4081020
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k4
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm7
-; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm1, %ymm8
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm7 {%k4}
+; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-SLOW-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm0, %ymm17
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm8 {%k4}
 ; AVX512BW-SLOW-NEXT:    movl $1082196484, %r10d # imm = 0x40810204
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k7
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm1, %ymm8
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm5 {%k7}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm0, %ymm17
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm7 {%k7}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
 ; AVX512BW-SLOW-NEXT:    movabsq $-8714997200177740921, %r10 # imm = 0x870E1C3870E1C387
 ; AVX512BW-SLOW-NEXT:    kmovq %r10, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm11 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rcx), %xmm31
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm31, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm12 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
-; AVX512BW-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm31[0],xmm8[1],xmm31[1],xmm8[2],xmm31[2],xmm8[3],xmm31[3],xmm8[4],xmm31[4],xmm8[5],xmm31[5],xmm8[6],xmm31[6],xmm8[7],xmm31[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
+; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rsi), %xmm29
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm29, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm10, %xmm4
-; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm10[0],xmm29[0],xmm10[1],xmm29[1],xmm10[2],xmm29[2],xmm10[3],xmm29[3],xmm10[4],xmm29[4],xmm10[5],xmm29[5],xmm10[6],xmm29[6],xmm10[7],xmm29[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm2[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm29, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %xmm30
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm30, %xmm5
+; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm30[0],xmm29[0],xmm30[1],xmm29[1],xmm30[2],xmm29[2],xmm30[3],xmm29[3],xmm30[4],xmm29[4],xmm30[5],xmm29[5],xmm30[6],xmm29[6],xmm30[7],xmm29[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm3
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    movabsq $871499720017774092, %r10 # imm = 0xC183060C183060C
 ; AVX512BW-SLOW-NEXT:    kmovq %r10, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm13 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %xmm25
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm25, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %xmm26
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm26, %xmm2
-; AVX512BW-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm26[0],xmm25[0],xmm26[1],xmm25[1],xmm26[2],xmm25[2],xmm26[3],xmm25[3],xmm26[4],xmm25[4],xmm26[5],xmm25[5],xmm26[6],xmm25[6],xmm26[7],xmm25[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm0[0,1,0,1,4,5,4,5]
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rax), %ymm4
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm1, %ymm1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm11 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %xmm23
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm23, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %xmm24
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm24, %xmm3
+; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm24[0],xmm23[0],xmm24[1],xmm23[1],xmm24[2],xmm23[2],xmm24[3],xmm23[3],xmm24[4],xmm23[4],xmm24[5],xmm23[5],xmm24[6],xmm23[6],xmm24[7],xmm23[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm2
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rax), %ymm31
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm0, %ymm0
 ; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm3 {%k7}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm3 {%k7}
 ; AVX512BW-SLOW-NEXT:    movl $135274560, %eax # imm = 0x8102040
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k7
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm17, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm2 {%k7}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm16, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm2 {%k7}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm0
 ; AVX512BW-SLOW-NEXT:    movabsq $8133997386832558192, %rax # imm = 0x70E1C3870E1C3870
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm13 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %ymm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero,ymm7[27],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm19, %ymm6, %ymm3
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm11 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %ymm6
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero,ymm6[27],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm19, %ymm4, %ymm3
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,1,1,4,4,5,5]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k1} = ymm7[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k1} = ymm6[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm5
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdx), %zmm2
@@ -8327,150 +8330,151 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vporq %zmm19, %zmm0, %zmm19
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm19 {%k5}
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %ymm5
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm16, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm14, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm0[18],zero,zmm0[20,21,20,21],zero,zmm0[19],zero,zmm0[19,20,21,22],zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm0[55],zero,zmm0[55,56,57,58],zero,zmm0[56],zero,zmm0[62,63]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %ymm16
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm14, %zmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm14 = zmm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm14[20],zero,zmm14[18],zero,zero,zero,zero,zmm14[21],zero,zmm14[19],zero,zero,zero,zero,zmm14[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm14[57],zero,zmm14[55],zero,zero,zero,zero,zmm14[58],zero,zmm14[56],zero,zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vporq %zmm0, %zmm14, %zmm14
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm24, %ymm0
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm14, %ymm24
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm24 {%k6}
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %ymm14
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm17, %zmm17
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm17 = zmm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm17[20],zero,zmm17[18],zero,zero,zero,zero,zmm17[21],zero,zmm17[19],zero,zero,zero,zero,zmm17[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm17[57],zero,zmm17[55],zero,zero,zero,zero,zmm17[58],zero,zmm17[56],zero,zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
+; AVX512BW-SLOW-NEXT:    vporq %zmm0, %zmm17, %zmm0
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm26, %ymm17
+; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm26
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm26 {%k6}
 ; AVX512BW-SLOW-NEXT:    movl $33818640, %eax # imm = 0x2040810
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm0, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm14 {%k5}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm14, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm1, %ymm17
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm0 {%k5}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k2
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm19 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm4[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7]
 ; AVX512BW-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 4-byte Reload
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k2} = ymm7[u,u,u,14,u,12,u,u,u,u,15,u,13,u,u,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm30[0],xmm27[0],xmm30[1],xmm27[1],xmm30[2],xmm27[2],xmm30[3],xmm27[3],xmm30[4],xmm27[4],xmm30[5],xmm27[5],xmm30[6],xmm27[6],xmm30[7],xmm27[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k2} = ymm6[u,u,u,14,u,12,u,u,u,u,15,u,13,u,u,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm28[0],xmm10[1],xmm28[1],xmm10[2],xmm28[2],xmm10[3],xmm28[3],xmm10[4],xmm28[4],xmm10[5],xmm28[5],xmm10[6],xmm28[6],xmm10[7],xmm28[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm2[11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm24 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm24 = ymm24[0,2,3,3,4,6,7,7]
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm24, %ymm14 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm14, %zmm14
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm17 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm17 = ymm17[0,2,3,3,4,6,7,7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm1, %zmm1
 ; AVX512BW-SLOW-NEXT:    movabsq $3485998880071096368, %rax # imm = 0x3060C183060C1830
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm14 {%k2}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm28[0],xmm23[0],xmm28[1],xmm23[1],xmm28[2],xmm23[2],xmm28[3],xmm23[3],xmm28[4],xmm23[4],xmm28[5],xmm23[5],xmm28[6],xmm23[6],xmm28[7],xmm23[7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k2}
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm27[0],xmm25[0],xmm27[1],xmm25[1],xmm27[2],xmm25[2],xmm27[3],xmm25[3],xmm27[4],xmm25[4],xmm27[5],xmm25[5],xmm27[6],xmm25[6],xmm27[7],xmm25[7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm17, %ymm17
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm16, %ymm16
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm0 {%k7}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm16[27],zero,zero,zero,zero,ymm16[30],zero,ymm16[28],zero,zero,zero,zero,ymm16[31],zero
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm0 {%k7}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vporq %ymm17, %ymm21, %ymm17
-; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Reload
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm21, %ymm21
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm21, %ymm17 {%k3}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm17, %zmm0
+; AVX512BW-SLOW-NEXT:    vporq %ymm16, %ymm17, %ymm16
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Reload
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm17, %ymm17
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm17, %ymm16 {%k3}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm16, %zmm0
 ; AVX512BW-SLOW-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm14 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm17, %ymm2, %ymm2
-; AVX512BW-SLOW-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm31[8],xmm8[8],xmm31[9],xmm8[9],xmm31[10],xmm8[10],xmm31[11],xmm8[11],xmm31[12],xmm8[12],xmm31[13],xmm8[13],xmm31[14],xmm8[14],xmm31[15],xmm8[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm21, %ymm7, %ymm6
-; AVX512BW-SLOW-NEXT:    vpor %ymm1, %ymm6, %ymm1
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm29[8],xmm10[8],xmm29[9],xmm10[9],xmm29[10],xmm10[10],xmm29[11],xmm10[11],xmm29[12],xmm10[12],xmm29[13],xmm10[13],xmm29[14],xmm10[14],xmm29[15],xmm10[15]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm16, %ymm3, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm6, %ymm0
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm29[8],xmm30[8],xmm29[9],xmm30[9],xmm29[10],xmm30[10],xmm29[11],xmm30[11],xmm29[12],xmm30[12],xmm29[13],xmm30[13],xmm29[14],xmm30[14],xmm29[15],xmm30[15]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
 ; AVX512BW-SLOW-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k3}
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm2, %ymm16, %ymm6
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm6
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
 ; AVX512BW-SLOW-NEXT:    vpor %ymm6, %ymm5, %ymm5
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
 ; AVX512BW-SLOW-NEXT:    movl $541098242, %eax # imm = 0x20408102
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm5 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512BW-SLOW-NEXT:    vpermw %ymm4, %ymm7, %ymm4
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm16 = xmm26[8],xmm25[8],xmm26[9],xmm25[9],xmm26[10],xmm25[10],xmm26[11],xmm25[11],xmm26[12],xmm25[12],xmm26[13],xmm25[13],xmm26[14],xmm25[14],xmm26[15],xmm25[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm16 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm16, %zmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm31, %ymm5 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512BW-SLOW-NEXT:    vpermw %ymm31, %ymm10, %ymm14
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm17 = xmm24[8],xmm23[8],xmm24[9],xmm23[9],xmm24[10],xmm23[10],xmm24[11],xmm23[11],xmm24[12],xmm23[12],xmm24[13],xmm23[13],xmm24[14],xmm23[14],xmm24[15],xmm23[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm17 {%k4}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm17, %zmm5
 ; AVX512BW-SLOW-NEXT:    movabsq $4066998693416279096, %rax # imm = 0x3870E1C3870E1C38
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k4
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm1 {%k4}
-; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,1,1,4,4,5,5]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm20[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm18, %ymm3
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm21, %ymm20, %ymm5
-; AVX512BW-SLOW-NEXT:    vpor %ymm3, %ymm5, %ymm3
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm17, %ymm12, %ymm4
-; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm4, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[18],zero,ymm15[18,19,20,21],zero,ymm15[19],zero,ymm15[25,26,27,22],zero,ymm15[20],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm12[18],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm0 {%k4}
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm18[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[0,0,1,1,4,4,5,5]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 {%k1} = ymm20[u,u,u,u,5,u,3,u,u,u,u,6,u,4,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm18, %ymm7
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm4, %ymm20, %ymm4
+; AVX512BW-SLOW-NEXT:    vpor %ymm7, %ymm4, %ymm4
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k2}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm16, %ymm15, %ymm5
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm13, %ymm3
+; AVX512BW-SLOW-NEXT:    vpor %ymm5, %ymm3, %ymm3
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[18],zero,ymm15[18,19,20,21],zero,ymm15[19],zero,ymm15[25,26,27,22],zero,ymm15[20],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[18],zero,zero,zero,zero,ymm13[21],zero,ymm13[19],zero,zero,zero,zero,ymm13[22],zero,ymm13[20]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpor %ymm5, %ymm7, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm4 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm2, %ymm7, %ymm2
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm2
-; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm9, %ymm0 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm2, %ymm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm4[18],zero,ymm4[20,21,20,21],zero,ymm4[19],zero,ymm4[19,20,21,22],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[20],zero,ymm5[18],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm5, %ymm3
+; AVX512BW-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm9, %ymm2 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm3, %ymm3
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm4 {%k5}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[20],zero,ymm7[18],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm5 {%k5}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
 ; AVX512BW-SLOW-NEXT:    movabsq $-4357498600088870461, %rax # imm = 0xC3870E1C3870E1C3
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k1}
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm3, 320(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm14, 192(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm13, (%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm11, 256(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm12, 256(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-SLOW-NEXT:    vmovaps %zmm0, 384(%rax)
-; AVX512BW-SLOW-NEXT:    addq $152, %rsp
+; AVX512BW-SLOW-NEXT:    addq $216, %rsp
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
 ;
@@ -8488,8 +8492,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm27[0,1,2,3],zmm19[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm0[23],zero,zmm0[21,22,23,26],zero,zmm0[24],zero,zmm0[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero,zmm0[61],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %ymm26
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm26[0,1,2,3],zmm20[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %ymm25
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm25[0,1,2,3],zmm20[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm1[25],zero,zmm1[23],zero,zero,zero,zero,zmm1[26],zero,zmm1[24],zero,zero,zero,zero,zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm0, %zmm1, %zmm0
@@ -8529,9 +8533,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movl $-2130574328, %r10d # imm = 0x81020408
 ; AVX512BW-FAST-NEXT:    kmovd %r10d, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k1}
-; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
-; AVX512BW-FAST-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm25, %ymm2
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12]
+; AVX512BW-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm26, %ymm2
 ; AVX512BW-FAST-NEXT:    movl $270549121, %r10d # imm = 0x10204081
 ; AVX512BW-FAST-NEXT:    kmovd %r10d, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k3}
@@ -8539,42 +8543,42 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm2
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %xmm11
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm3
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm3
+; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm3, %xmm1
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm31 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %xmm15
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm4
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm15, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm18
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm18, %xmm5
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm15[8],xmm18[8],xmm15[9],xmm18[9],xmm15[10],xmm18[10],xmm15[11],xmm18[11],xmm15[12],xmm18[12],xmm15[13],xmm18[13],xmm15[14],xmm18[14],xmm15[15],xmm18[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm4[0,1,0,1,4,5,4,5]
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm9 {%k2}
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm9 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%r9), %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm13, %xmm4
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %xmm16
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm29 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm16, %xmm5
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm16[8],xmm13[8],xmm16[9],xmm13[9],xmm16[10],xmm13[10],xmm16[11],xmm13[11],xmm16[12],xmm13[12],xmm16[13],xmm13[13],xmm16[14],xmm13[14],xmm16[15],xmm13[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movl $67637280, %r10d # imm = 0x4081020
 ; AVX512BW-FAST-NEXT:    kmovd %r10d, %k4
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
+; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm3, %ymm5
 ; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm28 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
 ; AVX512BW-FAST-NEXT:    # ymm28 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm28, %ymm7
@@ -8583,40 +8587,40 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovd %r10d, %k5
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm0, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm7, %ymm4 {%k5}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm7, %ymm3 {%k5}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    movabsq $-8714997200177740921, %r10 # imm = 0x870E1C3870E1C387
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm9 {%k6}
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm4
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm9 {%k6}
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm5
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %xmm17
-; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm17, %xmm1
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm17[0],xmm4[0],xmm17[1],xmm4[1],xmm17[2],xmm4[2],xmm17[3],xmm4[3],xmm17[4],xmm4[4],xmm17[5],xmm4[5],xmm17[6],xmm4[6],xmm17[7],xmm4[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm17, %xmm2
+; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm17[0],xmm3[0],xmm17[1],xmm3[1],xmm17[2],xmm3[2],xmm17[3],xmm3[3],xmm17[4],xmm3[4],xmm17[5],xmm3[5],xmm17[6],xmm3[6],xmm17[7],xmm3[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm7
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
 ; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm6, %xmm6
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm6[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movabsq $871499720017774092, %r10 # imm = 0xC183060C183060C
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm12 {%k6}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm12 {%k6}
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm7
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm1
+; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm6
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm6, %xmm2
 ; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm1
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rax), %ymm29
 ; AVX512BW-FAST-NEXT:    vpermw %ymm29, %ymm0, %ymm0
@@ -8659,43 +8663,43 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm23 = zmm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm23[20],zero,zmm23[18],zero,zero,zero,zero,zmm23[21],zero,zmm23[19],zero,zero,zero,zero,zmm23[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm23[57],zero,zmm23[55],zero,zero,zero,zero,zmm23[58],zero,zmm23[56],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm14, %zmm23, %zmm14
-; AVX512BW-FAST-NEXT:    vpermw %ymm29, %ymm25, %ymm23
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm14, %ymm25
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm23, %ymm25 {%k3}
+; AVX512BW-FAST-NEXT:    vpermw %ymm29, %ymm26, %ymm23
+; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm14, %ymm26
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm23, %ymm26 {%k3}
 ; AVX512BW-FAST-NEXT:    movl $33818640, %eax # imm = 0x2040810
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm29, %ymm23, %ymm23
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm23, %ymm14 {%k2}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm14, %zmm14
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm14, %zmm14
 ; AVX512BW-FAST-NEXT:    movabsq $2033499346708139548, %rax # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm19 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm25, %ymm22, %ymm14
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm26, %ymm22, %ymm14
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm1, %ymm30
 ; AVX512BW-FAST-NEXT:    vporq %ymm14, %ymm30, %ymm14
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm17[8],xmm4[9],xmm17[9],xmm4[10],xmm17[10],xmm4[11],xmm17[11],xmm4[12],xmm17[12],xmm4[13],xmm17[13],xmm4[14],xmm17[14],xmm4[15],xmm17[15]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm4, %xmm4
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm4, %zmm30
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm17[8],xmm3[9],xmm17[9],xmm3[10],xmm17[10],xmm3[11],xmm17[11],xmm3[12],xmm17[12],xmm3[13],xmm17[13],xmm3[14],xmm17[14],xmm3[15],xmm17[15]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm3, %xmm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm30
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm17
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm31
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm31
 ; AVX512BW-FAST-NEXT:    vporq %ymm17, %ymm31, %ymm17
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm3, %zmm17
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm4, %zmm17
 ; AVX512BW-FAST-NEXT:    movabsq $435749860008887046, %rax # imm = 0x60C183060C18306
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm30, %zmm17 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm21, %ymm30
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm20, %ymm31
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm21, %ymm30
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm20, %ymm31
 ; AVX512BW-FAST-NEXT:    vporq %ymm30, %ymm31, %ymm30
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
@@ -8749,35 +8753,35 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm25, %ymm27, %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm26, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb %ymm26, %ymm27, %ymm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm25, %ymm2
 ; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm27[18],zero,ymm27[18,19,20,21],zero,ymm27[19],zero,ymm27[25,26,27,22],zero,ymm27[20],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm26[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm26[18],zero,zero,zero,zero,ymm26[21],zero,ymm26[19],zero,zero,zero,zero,ymm26[22],zero,ymm26[20]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm25[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm25[18],zero,zero,zero,zero,ymm25[21],zero,ymm25[19],zero,zero,zero,zero,ymm25[22],zero,ymm25[20]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm24, %ymm2
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
-; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm24[18,19,20,21],zero,ymm24[19],zero,ymm24[21,20,21,22],zero,ymm24[20],zero,ymm24[22,23]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm3
+; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm24[18,19,20,21],zero,ymm24[19],zero,ymm24[21,20,21,22],zero,ymm24[20],zero,ymm24[22,23]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22],zero,ymm7[20],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm4, %ymm7, %ymm4
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512BW-FAST-NEXT:    vpor %ymm3, %ymm7, %ymm3
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k4}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm1
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm3
 ; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm1 {%k3}
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm3, %ymm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm4[18],zero,ymm4[20,21,20,21],zero,ymm4[19],zero,ymm4[19,20,21,22],zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[20],zero,ymm7[18],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index c250677f9d0a6..08e906dea77b8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -3157,13 +3157,13 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r10), %xmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r10), %xmm12
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm25[8],xmm1[9],xmm25[9],xmm1[10],xmm25[10],xmm1[11],xmm25[11],xmm1[12],xmm25[12],xmm1[13],xmm25[13],xmm1[14],xmm25[14],xmm1[15],xmm25[15]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm5
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r9), %xmm13
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm3
 ; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r8), %xmm14
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm19
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm19
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,0,32,4,5,1,33,2,1,2,34,4,5,3,35,16,17,20,52,20,21,21,53,16,17,22,54,22,21,23,55]
 ; AVX512BW-SLOW-NEXT:    vpermt2w %zmm5, %zmm23, %zmm19
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
@@ -3193,9 +3193,9 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm19, %zmm7 {%k1}
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm4
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm19, %zmm20
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm19, %zmm20
 ; AVX512BW-SLOW-NEXT:    vpermt2w %zmm4, %zmm23, %zmm20
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7]
 ; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm19 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
@@ -3213,9 +3213,9 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm19
 ; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm20, %zmm19 {%k1}
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm4
 ; AVX512BW-SLOW-NEXT:    vpermt2w %zmm0, %zmm23, %zmm4
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
 ; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -3233,9 +3233,9 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm0 {%k1}
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm25[0],xmm1[1],xmm25[1],xmm1[2],xmm25[2],xmm1[3],xmm25[3],xmm1[4],xmm25[4],xmm1[5],xmm25[5],xmm1[6],xmm25[6],xmm1[7],xmm25[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm1
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm2
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
 ; AVX512BW-SLOW-NEXT:    vpermt2w %zmm1, %zmm23, %zmm2
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
@@ -3265,71 +3265,71 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r11), %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%r11), %xmm11
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r11), %xmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r10), %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%r10), %xmm12
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r10), %xmm8
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm5
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%r9), %xmm13
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r9), %xmm9
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%r8), %xmm14
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r8), %xmm10
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm4, %zmm15
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,0,32,4,5,1,33,2,1,2,34,4,5,3,35,16,17,20,52,20,21,21,53,16,17,22,54,22,21,23,55]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm5, %zmm4, %zmm15
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm5, %zmm4, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rcx), %xmm16
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rdx), %xmm17
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm18
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rsi), %xmm19
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rdi), %xmm20
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm6, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = <0,32,u,u,1,33,u,u,2,34,u,u,3,35,u,u,20,52,u,u,21,53,u,u,22,54,u,u,23,55,u,u>
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm18, %zmm10, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%rcx), %xmm12
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdx), %xmm14
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm15
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %xmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rsi), %xmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm18
+; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rdi), %xmm19
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm18[8],xmm16[8],xmm18[9],xmm16[9],xmm18[10],xmm16[10],xmm18[11],xmm16[11],xmm18[12],xmm16[12],xmm18[13],xmm16[13],xmm18[14],xmm16[14],xmm18[15],xmm16[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <0,32,u,u,1,33,u,u,2,34,u,u,3,35,u,u,20,52,u,u,21,53,u,u,22,54,u,u,23,55,u,u>
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm15, %zmm20, %zmm6
 ; AVX512BW-FAST-NEXT:    movw $-21846, %cx # imm = 0xAAAA
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
-; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm15, %zmm6 {%k1}
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm15, %zmm15
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm18 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm18, %zmm18
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm15, %zmm4, %zmm18
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3],xmm17[4],xmm16[4],xmm17[5],xmm16[5],xmm17[6],xmm16[6],xmm17[7],xmm16[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm15, %zmm15
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm20[0],xmm19[0],xmm20[1],xmm19[1],xmm20[2],xmm19[2],xmm20[3],xmm19[3],xmm20[4],xmm19[4],xmm20[5],xmm19[5],xmm20[6],xmm19[6],xmm20[7],xmm19[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm21, %zmm21
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm15, %zmm10, %zmm21
-; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm18, %zmm21 {%k1}
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm12, %zmm12
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm4, %zmm12
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm17[8],xmm16[8],xmm17[9],xmm16[9],xmm17[10],xmm16[10],xmm17[11],xmm16[11],xmm17[12],xmm16[12],xmm17[13],xmm16[13],xmm17[14],xmm16[14],xmm17[15],xmm16[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm20[8],xmm19[8],xmm20[9],xmm19[9],xmm20[10],xmm19[10],xmm20[11],xmm19[11],xmm20[12],xmm19[12],xmm20[13],xmm19[13],xmm20[14],xmm19[14],xmm20[15],xmm19[15]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm13, %zmm13
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm10, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm12, %zmm13 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm11, %zmm6 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm15, %zmm15
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm4, %zmm15
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3],xmm19[4],xmm17[4],xmm19[5],xmm17[5],xmm19[6],xmm17[6],xmm19[7],xmm17[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm21, %zmm21, %zmm21
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm20, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm15, %zmm21 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm8
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm7, %zmm4, %zmm8
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm7, %zmm20, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm8, %zmm9 {%k1}
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm4, %zmm1
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm2, %zmm2
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm10, %zmm2
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm18[0],xmm16[0],xmm18[1],xmm16[1],xmm18[2],xmm16[2],xmm18[3],xmm16[3],xmm18[4],xmm16[4],xmm18[5],xmm16[5],xmm18[6],xmm16[6],xmm18[7],xmm16[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm20, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm1, %zmm2 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, 192(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, 128(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
index c2b36331e9df0..65be0085cac25 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
@@ -571,7 +571,7 @@ define <64 x i8> @shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512F-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_49_51_55_57_61_63_67_69_73_75_79_81_85_87_91_93_97_99_103_105_109_111_115_117_121_123_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -598,7 +598,7 @@ define <64 x i8> @shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_
 ; AVX512BW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512BW-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512BW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_49_51_55_57_61_63_67_69_73_75_79_81_85_87_91_93_97_99_103_105_109_111_115_117_121_123_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -625,7 +625,7 @@ define <64 x i8> @shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_
 ; AVX512DQ-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512DQ-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_01_03_07_09_13_15_19_21_25_27_31_33_37_39_43_45_49_51_55_57_61_63_67_69_73_75_79_81_85_87_91_93_97_99_103_105_109_111_115_117_121_123_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -662,7 +662,7 @@ define <64 x i8> @shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_49_53_55_59_61_65_67_71_73_77_79_83_85_89_91_95_97_101_103_107_109_113_115_119_121_125_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -689,7 +689,7 @@ define <64 x i8> @shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512BW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_49_53_55_59_61_65_67_71_73_77_79_83_85_89_91_95_97_101_103_107_109_113_115_119_121_125_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -716,7 +716,7 @@ define <64 x i8> @shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_01_05_07_11_13_17_19_23_25_29_31_35_37_41_43_47_49_53_55_59_61_65_67_71_73_77_79_83_85_89_91_95_97_101_103_107_109_113_115_119_121_125_127_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -754,7 +754,7 @@ define <64 x i8> @shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_50_52_56_58_62_64_68_70_74_76_80_82_86_88_92_94_98_100_104_106_110_112_116_118_122_124_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -781,7 +781,7 @@ define <64 x i8> @shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512BW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_50_52_56_58_62_64_68_70_74_76_80_82_86_88_92_94_98_100_104_106_110_112_116_118_122_124_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -808,7 +808,7 @@ define <64 x i8> @shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_02_04_08_10_14_16_20_22_26_28_32_34_38_40_44_46_50_52_56_58_62_64_68_70_74_76_80_82_86_88_92_94_98_100_104_106_110_112_116_118_122_124_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -845,7 +845,7 @@ define <64 x i8> @shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_48_52_54_58_60_64_66_70_72_76_78_82_84_88_90_94_96_100_102_106_108_112_114_118_120_124_126_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -872,7 +872,7 @@ define <64 x i8> @shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512BW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512BW-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_48_52_54_58_60_64_66_70_72_76_78_82_84_88_90_94_96_100_102_106_108_112_114_118_120_124_126_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:
@@ -899,7 +899,7 @@ define <64 x i8> @shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_00_04_06_10_12_16_18_22_24_28_30_34_36_40_42_46_48_52_54_58_60_64_66_70_72_76_78_82_84_88_90_94_96_100_102_106_108_112_114_118_120_124_126_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u_u:

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
index 106059017e54c..2ab13bbb15c56 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
@@ -332,47 +332,45 @@ define <64 x i8> @f3(ptr %p0) {
 ;
 ; AVX512F-LABEL: f3:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,0,4,6,10,12,128,128,128,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,128,128,128,128,128,0,2,6,8,12,14>
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,128,128,128,128,0,4,6,10,12,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = <2,4,8,10,14,128,128,128,128,128,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-NEXT:    vpor %xmm4, %xmm3, %xmm3
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,0,2,6,8,12,14,2,4,8,10,14,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $226, %ymm2, %ymm4, %ymm3
-; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
-; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-NEXT:    vextracti32x4 $2, %zmm2, %xmm2
-; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm5
-; AVX512F-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
-; AVX512F-NEXT:    vmovdqa 160(%rdi), %xmm6
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-NEXT:    vmovdqa 176(%rdi), %xmm6
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vmovdqa 160(%rdi), %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,0,4,6,10,12,128,128,128,128,128,128>
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa 176(%rdi), %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,128,128,128,128,128,0,2,6,8,12,14>
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vpternlogq $216, %ymm4, %ymm5, %ymm0
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,0,2,6,8,12,14,2,4,8,10,14,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $216, %ymm5, %ymm2, %ymm0
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,128,128,0,4,6,10,12,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
+; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,4,8,10,14,128,128,128,128,128,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
+; AVX512F-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3,4],xmm2[5,6,7]
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512F-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $226, %ymm1, %ymm5, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: f3:
@@ -405,8 +403,6 @@ define <64 x i8> @f3(ptr %p0) {
 ; AVX512BW-NEXT:    vmovdqa 96(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
 ; AVX512BW-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
 ; AVX512BW-NEXT:    vmovdqa 160(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
 ; AVX512BW-NEXT:    vmovdqa 176(%rdi), %xmm4

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
index 850eb01faf78a..3d9d30e107db5 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
@@ -2855,11 +2855,11 @@ define void @vec384_v48i8_to_v16i24_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,0,u,1,1,u,2,2,u,3,3,u,4,4,u,5>
-; AVX512BW-NEXT:    vpermw %ymm0, %ymm2, %ymm0
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,0,u,1,1,u,2,2,u,3,3,u,4,4,u,5>
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[11],zero,zero,xmm0[12],zero,zero,xmm0[13],zero,zero,xmm0[14],zero,zero,xmm0[15],zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3091,12 +3091,12 @@ define void @vec384_v48i8_to_v8i48_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bia
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,u,u,1,u,u,2,u,u,3,u,u,4,u,u,5>
-; AVX512BW-NEXT:    vpermw %ymm0, %ymm2, %ymm0
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpermw %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3333,11 +3333,11 @@ define void @vec384_v48i8_to_v4i96_factor12(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3456,11 +3456,11 @@ define void @vec384_v48i8_to_v3i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3740,10 +3740,10 @@ define void @vec384_v24i16_to_v12i32_factor2(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
-; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512BW-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512BW-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-NEXT:    vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3908,7 +3908,7 @@ define void @vec384_v24i16_to_v8i48_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -3922,7 +3922,7 @@ define void @vec384_v24i16_to_v8i48_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermt2w %ymm0, %ymm1, %ymm2
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13],zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4053,9 +4053,9 @@ define void @vec384_v24i16_to_v6i64_factor4(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9],zero,zero,zero,zero,zero,zero,xmm0[10,11],zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -4222,7 +4222,7 @@ define void @vec384_v24i16_to_v4i96_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4236,7 +4236,7 @@ define void @vec384_v24i16_to_v4i96_factor6(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermt2w %ymm0, %ymm1, %ymm2
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4403,7 +4403,7 @@ define void @vec384_v24i16_to_v3i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4417,7 +4417,7 @@ define void @vec384_v24i16_to_v3i128_factor8(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermt2w %ymm0, %ymm1, %ymm2
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4879,7 +4879,7 @@ define void @vec384_v12i32_to_v4i96_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4893,7 +4893,7 @@ define void @vec384_v12i32_to_v4i96_factor3(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vpexpandd %ymm0, %ymm1 {%k1} {z}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -5048,7 +5048,7 @@ define void @vec384_v12i32_to_v3i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -5062,7 +5062,7 @@ define void @vec384_v12i32_to_v3i128_factor4(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vpexpandd %ymm0, %ymm1 {%k1} {z}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -5391,7 +5391,7 @@ define void @vec384_v6i64_to_v3i128_factor2(ptr %in.vec.base.ptr, ptr %in.vec.bi
 ; AVX512BW-NEXT:    vpexpandq %ymm0, %ymm1 {%k1} {z}
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -6106,7 +6106,7 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512F-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -6145,7 +6145,7 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
 ; AVX512BW-SLOW-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
 ; AVX512BW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512BW-SLOW-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 869068fad0888..21224a088db94 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -2649,7 +2649,7 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2793,7 +2793,7 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2944,7 +2944,7 @@ define void @vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8(ptr %in.v
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3088,7 +3088,7 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.v
 ; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3238,7 +3238,7 @@ define void @vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3384,7 +3384,7 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vmovdqu8 %ymm0, %ymm2 {%k1}
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -3676,7 +3676,7 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,xmm0[0,1],zero,zero,xmm0[0,1],zero,zero,xmm0[0,1],zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -3866,7 +3866,7 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -3880,7 +3880,7 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4093,7 +4093,7 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4107,7 +4107,7 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4299,7 +4299,7 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4313,7 +4313,7 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4451,7 +4451,7 @@ define void @vec384_i16_widen_to_i128_factor8_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
 ; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3,4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -4774,7 +4774,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4786,10 +4786,10 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,0,15]
 ; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4979,7 +4979,7 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3]
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4991,10 +4991,10 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,0]
 ; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6],ymm2[7]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -5407,7 +5407,7 @@ define void @vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512BW-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 5050327a89fda..fed93faa47b3f 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -2095,13 +2095,13 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ; AVX512BW-LABEL: vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero
-; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15]
+; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm1 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,15,3,4,15,6,7,15,9,10,15,12,13,15]
 ; AVX512BW-NEXT:    vpbroadcastb (%rdi), %xmm2
-; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero,xmm0[0],zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2219,13 +2219,13 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
-; AVX512BW-NEXT:    vpbroadcastd %xmm0, %ymm0
+; AVX512BW-NEXT:    vpbroadcastd %xmm0, %ymm2
 ; AVX512BW-NEXT:    movl $286331153, %eax # imm = 0x11111111
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2347,13 +2347,13 @@ define void @vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8(ptr %in.e
 ; AVX512BW-LABEL: vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,15,6,7,8,9,10,15,12,13,14]
+; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm1 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,15,6,7,8,9,10,15,12,13,14]
 ; AVX512BW-NEXT:    vpbroadcastb (%rdi), %xmm2
-; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2471,13 +2471,13 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.e
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512BW-NEXT:    vpbroadcastq %xmm0, %ymm2
 ; AVX512BW-NEXT:    movl $16843009, %eax # imm = 0x1010101
 ; AVX512BW-NEXT:    kmovd %eax, %k1
-; AVX512BW-NEXT:    vmovdqu8 %ymm0, %ymm1 {%k1}
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2599,13 +2599,13 @@ define void @vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512BW-LABEL: vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[15,0,1,2,3,4,5,6,7,8,9,10,15,12,13,14]
+; AVX512BW-NEXT:    vpalignr {{.*#+}} xmm1 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],xmm0[0]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[15,0,1,2,3,4,5,6,7,8,9,10,15,12,13,14]
 ; AVX512BW-NEXT:    vpbroadcastb (%rdi), %xmm2
-; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2731,7 +2731,7 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512BW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-NEXT:    vzeroupper


        


More information about the llvm-commits mailing list