[llvm] 37bc62e - [X86] lowerShuffleAsLanePermuteAndRepeatedMask - retain the per-lane undef elements and don't just copy the repeated mask

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 29 03:04:04 PST 2023


Author: Simon Pilgrim
Date: 2023-01-29T11:03:47Z
New Revision: 37bc62ed0a24303aa572155009358b8937ab8b4c

URL: https://github.com/llvm/llvm-project/commit/37bc62ed0a24303aa572155009358b8937ab8b4c
DIFF: https://github.com/llvm/llvm-project/commit/37bc62ed0a24303aa572155009358b8937ab8b4c.diff

LOG: [X86] lowerShuffleAsLanePermuteAndRepeatedMask - retain the per-lane undef elements and don't just copy the repeated mask

lowerShuffleAsLanePermuteAndRepeatedMask expands a shuffle from shuffle(x,y,mask) to shuffle(shuffle(x,y,lanemask1),shuffle(x,y,lanemask2),repeatedinlanemask)

However, we weren't making use of the fact that elements of the original mask might be undef - instead of fully applying the entire repeatedinlanemask to every lane, we can simplify the mask if we never demanded that element in the original mask.

Yet another improvement addressing regressions from D127115

Differential Revision: https://reviews.llvm.org/D142536

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6884ca1e776ff..2df82b5976973 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -17432,6 +17432,10 @@ static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
     return SDValue();
 
   for (int i = 0; i != NumElts; ++i) {
+    if (Mask[i] < 0) {
+      NewMask[i] = -1;
+      continue;
+    }
     NewMask[i] = RepeatMask[i % NumLaneElts];
     if (NewMask[i] < 0)
       continue;

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index 1a434a1391bde..5ed68056069e5 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -2433,10 +2433,10 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm0
-; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
@@ -2449,10 +2449,10 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX512F-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX512F-NEXT:    vpbroadcastb %xmm0, %ymm0
-; AVX512F-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX512F-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
@@ -2465,10 +2465,10 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512DQ-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX512DQ-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512DQ-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512DQ-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX512DQ-NEXT:    vpbroadcastb %xmm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
-; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
@@ -2481,12 +2481,12 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-NEXT:    vpbroadcastb %xmm0, %ymm1
-; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpbroadcastb %xmm0, %ymm0
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512BW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
@@ -2705,7 +2705,7 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-NEXT:    vpbroadcastd %xmm0, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %xmm0
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
@@ -2975,7 +2975,7 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.v
 ; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpbroadcastb %xmm0, %xmm0
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
@@ -3254,7 +3254,7 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
@@ -4271,7 +4271,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-NEXT:    vpbroadcastd %xmm0, %xmm0
 ; AVX2-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index c451ff88a89fc..ee53111c7a895 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1922,7 +1922,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX2-NEXT:    vpbroadcastb (%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
@@ -1935,7 +1935,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512F-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpbroadcastb (%rdi), %ymm1
 ; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
 ; AVX512F-NEXT:    vpbroadcastb (%rdi), %ymm1
@@ -1950,7 +1950,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512DQ-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24:
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512DQ-NEXT:    vpbroadcastb (%rdi), %ymm1
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
 ; AVX512DQ-NEXT:    vpbroadcastb (%rdi), %ymm1
@@ -1965,7 +1965,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX512BW-LABEL: vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa 48(%rdi), %xmm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpbroadcastb (%rdi), %ymm1
 ; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
 ; AVX512BW-NEXT:    vpbroadcastb (%rdi), %ymm1
@@ -2157,7 +2157,7 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
@@ -2390,7 +2390,7 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.e
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
@@ -2627,7 +2627,7 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
 ; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
@@ -2843,7 +2843,7 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpbroadcastw (%rdi), %xmm0
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,0,0,0,4,5,6,7,8,8,8,8,12,13,14,15]
 ; AVX2-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
@@ -3468,7 +3468,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa %ymm0, 32(%rdx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index f51e3bb074006..49383d8aba42f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -1600,36 +1600,27 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm12[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
 ; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],ymm5[2,3]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm6[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
 ; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[0,1],ymm5[0,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4,5,6],ymm11[7],ymm2[8,9],ymm11[10],ymm2[11,12,13,14],ymm11[15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm13
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5],ymm12[6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm9[2],ymm5[3,4,5,6],ymm9[7],ymm5[8,9],ymm9[10],ymm5[11,12,13,14],ymm9[15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
@@ -1641,84 +1632,70 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm7[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm6[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm12[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1,2,3,4],ymm10[5,6],ymm13[7],ymm10[8],ymm13[9,10,11,12],ymm10[13,14],ymm13[15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm13[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm15[1,2],xmm2[3],xmm15[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7],ymm10[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm10[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3],xmm2[4],xmm9[5,6],xmm2[7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm12[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm10[1,2,3,4],ymm9[5,6],ymm10[7],ymm9[8],ymm10[9,10,11,12],ymm9[13,14],ymm10[15]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0,1,2],ymm2[3,4,5,6,7],ymm9[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1,2],xmm10[3],xmm11[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3,4],xmm9[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm2[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2,3],xmm4[4],xmm10[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm6[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm11[1,2,3,4],ymm10[5],ymm11[6,7],ymm10[8],ymm11[9,10,11,12],ymm10[13],ymm11[14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm4, %ymm10, %ymm4
+; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm6[1,2,3,4],ymm1[5],ymm6[6,7],ymm1[8],ymm6[9,10,11,12],ymm1[13],ymm6[14,15]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[1,1,2,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm2, %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm11, %ymm1, %ymm4, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,1,0,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2,3],xmm2[4],xmm6[5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rsi)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, (%r9)
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
@@ -1738,33 +1715,27 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm7
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
 ; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],ymm5[2,3]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[0,1],ymm5[0,1]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4,5,6],ymm12[7],ymm2[8,9],ymm12[10],ymm2[11,12,13,14],ymm12[15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm15[2],xmm13[3],xmm15[4,5],xmm13[6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5],ymm13[6,7]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm9[2],ymm5[3,4,5,6],ymm9[7],ymm5[8,9],ymm9[10],ymm5[11,12,13,14],ymm9[15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5],ymm9[6,7]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
@@ -1775,72 +1746,64 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm7[4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm6[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm12[u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1,2,3,4],ymm10[5,6],ymm13[7],ymm10[8],ymm13[9,10,11,12],ymm10[13,14],ymm13[15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,2,3]
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm14[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm15[0],xmm2[1,2],xmm15[3],xmm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7],ymm10[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm10[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3],xmm2[4],xmm9[5,6],xmm2[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm12[u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm10[1,2,3,4],ymm9[5,6],ymm10[7],ymm9[8],ymm10[9,10,11,12],ymm9[13,14],ymm10[15]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0,1,2],ymm2[3,4,5,6,7],ymm9[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1,2],xmm10[3],xmm11[4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3,4],xmm9[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm6[u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm10[1,2,3,4],ymm7[5],ymm10[6,7],ymm7[8],ymm10[9,10,11,12],ymm7[13],ymm10[14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
 ; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10,11,12],ymm1[13],ymm2[14,15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rsi)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-FAST-NEXT:    vmovdqa %ymm4, (%r9)
 ; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX2-FAST-NEXT:    vzeroupper
@@ -1860,33 +1823,27 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],ymm5[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm11 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[0,1],ymm5[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4,5,6],ymm12[7],ymm2[8,9],ymm12[10],ymm2[11,12,13,14],ymm12[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm15[2],xmm13[3],xmm15[4,5],xmm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm9[2],ymm5[3,4,5,6],ymm9[7],ymm5[8,9],ymm9[10],ymm5[11,12,13,14],ymm9[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5],ymm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
@@ -1897,72 +1854,64 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm10 = ymm7[4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm12 = ymm6[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm13 = ymm12[u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1,2,3,4],ymm10[5,6],ymm13[7],ymm10[8],ymm13[9,10,11,12],ymm10[13,14],ymm13[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm14[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm15[0],xmm2[1,2],xmm15[3],xmm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7],ymm10[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm10[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3],xmm2[4],xmm9[5,6],xmm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm9 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm10 = ymm12[u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm10[1,2,3,4],ymm9[5,6],ymm10[7],ymm9[8],ymm10[9,10,11,12],ymm9[13,14],ymm10[15]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0,1,2],ymm2[3,4,5,6,7],ymm9[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1,2],xmm10[3],xmm11[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3,4],xmm9[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm10 = ymm6[u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm10[1,2,3,4],ymm7[5],ymm10[6,7],ymm7[8],ymm10[9,10,11,12],ymm7[13],ymm10[14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm10, %ymm7, %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10,11,12],ymm1[13],ymm2[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm1, %ymm4, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rsi)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, (%r9)
 ; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
@@ -1971,133 +1920,111 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-LABEL: load_i16_stride6_vf16:
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm7[3],xmm1[4,5],xmm7[6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm9
-; AVX512F-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[0,2,2,1,4,6,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm7[0,3,2,3,4,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm11[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm10[2],ymm2[3,4,5,6],ymm10[7],ymm2[8,9],ymm10[10],ymm2[11,12,13,14],ymm10[15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm13
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm9
+; AVX512F-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm7
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm13
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[0,2,0,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5],ymm12[6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm1[2,1,2,1,6,5,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4,5],xmm6[6],xmm8[7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm11[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4,5,6],ymm8[7],ymm9[8,9],ymm8[10],ymm9[11,12,13,14],ymm8[15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5],ymm9[6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm7[1,1,0,3,5,5,4,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm9[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm10[1,2,3,4],ymm8[5,6],ymm10[7],ymm8[8],ymm10[9,10,11,12],ymm8[13,14],ymm10[15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,1]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm11[0,1,2,3,6,5,6,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[0,0,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3],xmm12[4],xmm13[5,6],xmm12[7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,2,3]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm13[0,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[2,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm15[1,2],xmm2[3],xmm15[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm8[0,1,2],ymm12[3,4,5,6,7],ymm8[8,9,10],ymm12[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm8[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm9[1,2,3,4],ymm2[5,6],ymm9[7],ymm2[8],ymm9[9,10,11,12],ymm2[13,14],ymm9[15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm11[0,1,2,3,7,5,6,5]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,1,1,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4],xmm10[5,6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[3,1,2,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[0,1,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1,2],xmm10[3],xmm11[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,1,2,1,6,5,6,5]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm10[0],ymm2[1,2,3,4],ymm10[5],ymm2[6,7],ymm10[8],ymm2[9,10,11,12],ymm10[13],ymm2[14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm4[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm11, %ymm10
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm7, %ymm10
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm0[0,1,0,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1,2,3],xmm3[4],xmm12[5],xmm3[6,7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3,4],ymm3[5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,1,0,3,5,5,4,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,1,3,3,4,5,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm1[1,2,3,4],ymm7[5],ymm1[6,7],ymm7[8],ymm1[9,10,11,12],ymm7[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm1, %ymm11, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm4, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, (%rdx)
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm7, (%r9)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
@@ -2105,7 +2032,6 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride6_vf16:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
@@ -2118,82 +2044,69 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm9
 ; AVX512F-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],mem[2,3]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm6[2,1,2,1,6,5,6,5]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[0,3,2,3,4,7,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4,5,6],ymm11[7],ymm2[8,9],ymm11[10],ymm2[11,12,13,14],ymm11[15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm14
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,0,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm15[2],xmm13[3],xmm15[4,5],xmm13[6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5],ymm13[6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5],ymm12[6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm16
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm10[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm8[2],ymm9[3,4,5,6],ymm8[7],ymm9[8,9],ymm8[10],ymm9[11,12,13,14],ymm8[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5],ymm9[6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm7[4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm6[0,3,2,3,4,7,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm9[u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm10[1,2,3,4],ymm8[5,6],ymm10[7],ymm8[8],ymm10[9,10,11,12],ymm8[13,14],ymm10[15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,1]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm11[0,1,2,3,6,5,6,4]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3],xmm12[4],xmm13[5,6],xmm12[7]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm8[0,1,2],ymm12[3,4,5,6,7],ymm8[8,9,10],ymm12[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,2,3]
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm14[2,1,2,0,4,5,6,7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm15[0],xmm2[1,2],xmm15[3],xmm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm8[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm9[1,2,3,4],ymm2[5,6],ymm9[7],ymm2[8],ymm9[9,10,11,12],ymm2[13,14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm11[0,1,2,3,7,5,6,5]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4],xmm10[5,6],xmm9[7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[3,1,2,1,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1,2],xmm10[3],xmm11[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,1,2,1,6,5,6,5]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,0,3,4,5,4,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm10[1,2,3,4],ymm7[5],ymm10[6,7],ymm7[8],ymm10[9,10,11,12],ymm7[13],ymm10[14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm7, %ymm11, %ymm10
+; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm7, %ymm10
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
@@ -2202,23 +2115,22 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,14,15,u,u,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm6[1,2,3,4],ymm2[5],ymm6[6,7],ymm2[8],ymm6[9,10,11,12],ymm2[13],ymm6[14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm2, %ymm11, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm4, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, (%rsi)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm9, (%r8)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm7, (%r9)
+; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
@@ -3338,286 +3250,238 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX2-SLOW-LABEL: load_i16_stride6_vf32:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $520, %rsp # imm = 0x208
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    subq $488, %rsp # imm = 0x1E8
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, (%rsp) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, %ymm6
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5,6],ymm4[7],ymm2[8,9],ymm4[10],ymm2[11,12,13,14],ymm4[15]
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm3[2,3],ymm2[2,3]
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm0[2,3],ymm1[2,3]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm0[0,1],ymm1[0,1]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm9[2],ymm5[3,4],ymm9[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm8[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm10[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm7[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, %ymm12
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm13[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0],ymm11[1,2,3,4],ymm0[5],ymm11[6,7],ymm0[8],ymm11[9,10,11,12],ymm0[13],ymm11[14,15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm3, %ymm11, %ymm0
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm5
-; AVX2-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1],ymm7[2],ymm5[3,4],ymm7[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, %ymm11
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm11[1],xmm0[2,3],xmm11[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm0, %ymm11, %ymm10, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm10
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm13[1],xmm10[2,3],xmm13[4],xmm10[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa %ymm14, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm8[0],ymm14[1],ymm8[2,3,4,5],ymm14[6],ymm8[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
+; AVX2-SLOW-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm14
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm4[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm15 = xmm14[0],xmm15[1],xmm14[2,3],xmm15[4],xmm14[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = mem[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm14[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4],ymm8[5],ymm7[6,7],ymm8[8],ymm7[9,10,11,12],ymm8[13],ymm7[14,15]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm15, %ymm7, %ymm6
-; AVX2-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm10[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm13[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3,4],ymm7[5],ymm8[6,7],ymm7[8],ymm8[9,10,11,12],ymm7[13],ymm8[14,15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm1, %ymm7, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm14[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm3, %xmm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb %ymm3, %ymm13, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm11, %xmm2
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vmovdqa %ymm6, %ymm10
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm12[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm11[1],xmm2[2,3],xmm11[4],xmm2[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6],ymm5[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm6[0,2,0,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
-; AVX2-SLOW-NEXT:    vpblendd $56, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0,1,2],mem[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0],ymm8[1],ymm4[2,3,4,5],ymm8[6],ymm4[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm6, %ymm4
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
+; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm9
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm1[2],ymm9[3,4],ymm1[5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm4
-; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm7
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2],xmm1[3],xmm7[4,5],xmm1[6],xmm7[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm12
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0,1,2],xmm4[3],xmm12[4,5],xmm4[6],xmm12[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm7
-; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm8
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm13[3],xmm8[4,5],xmm13[6],xmm8[7]
-; AVX2-SLOW-NEXT:    vmovdqa %ymm12, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm12[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm12[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm13[2],ymm11[3,4,5,6],ymm13[7],ymm11[8,9],ymm13[10],ymm11[11,12,13,14],ymm13[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm13 = ymm15[0],mem[1],ymm15[2,3],mem[4],ymm15[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm13, %xmm10
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm14[2],xmm10[3],xmm14[4,5],xmm10[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3,4,5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm11[3],xmm8[4,5],xmm11[6],xmm8[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $109, (%rsp), %ymm2, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm11, %xmm15
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0,1],xmm12[2],xmm15[3],xmm12[4,5],xmm15[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm15 = ymm14[0],mem[1],ymm14[2,3,4,5],mem[6],ymm14[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm15, %ymm13
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3,4,5],ymm12[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7],ymm10[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm12[0,1,2],ymm8[3,4,5,6,7],ymm12[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm10[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm11 = mem[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0,1],ymm8[2],ymm11[3,4,5,6],ymm8[7],ymm11[8,9],ymm8[10],ymm11[11,12,13,14],ymm8[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2],xmm3[3],xmm7[4,5],xmm3[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3,4,5],ymm3[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm2[0,1,2],ymm4[3,4,5,6,7],ymm2[8,9,10],ymm4[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm1
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm9[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm2[3,4,5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm13, %xmm11, %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm15, %ymm2
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm9[2],mem[3,4],ymm9[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1],ymm10[2],mem[3,4],ymm10[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm5[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm5[1,2],xmm12[3],xmm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3,4,5,6,7],ymm13[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm9[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm10[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3,4],ymm0[5,6],ymm3[7],ymm0[8],ymm3[9,10,11,12],ymm0[13,14],ymm3[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm13
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm5[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[0,0,0,0,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4],xmm5[5,6],xmm7[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm7[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0],xmm8[1,2],xmm13[3],xmm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm13[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm8[4],xmm5[5,6],xmm8[7]
+; AVX2-SLOW-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm8 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $219, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,0,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,0,0,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm13[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm14[4],xmm5[5,6],xmm14[7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm12[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm1[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0],ymm10[1,2,3,4],ymm14[5,6],ymm10[7],ymm14[8],ymm10[9,10,11,12],ymm14[13,14],ymm10[15]
-; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm15[0,1],mem[2],ymm15[3,4],mem[5],ymm15[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm14[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm0[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm15[0],xmm8[1,2],xmm15[3],xmm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0],xmm3[1,2],xmm12[3],xmm3[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1,2],ymm5[3,4,5,6,7],ymm10[8,9,10],ymm5[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm10[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm9, %ymm10
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1,2,3,4],ymm10[5,6],ymm4[7],ymm10[8],ymm4[9,10,11,12],ymm10[13,14],ymm4[15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5,6],xmm6[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0,1,2],ymm5[3,4,5,6,7],ymm12[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1,2],xmm4[3],xmm6[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX2-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5,6],xmm4[7]
-; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm12, %ymm4
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4],ymm4[5,6],ymm1[7],ymm4[8],ymm1[9,10,11,12],ymm4[13,14],ymm1[15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2],xmm0[3],xmm4[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm4
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7],ymm4[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[0,1,0,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm2, %xmm7
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm7
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm7[4],xmm4[5],xmm7[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
@@ -3635,20 +3499,20 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm8, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm8, %xmm3
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm7, 32(%rsi)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
@@ -3660,965 +3524,854 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm7, 32(%rcx)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm5, (%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 32(%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 32(%r8)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 32(%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm4, (%r9)
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 32(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, 32(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%rax)
-; AVX2-SLOW-NEXT:    addq $520, %rsp # imm = 0x208
+; AVX2-SLOW-NEXT:    addq $488, %rsp # imm = 0x1E8
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
 ; AVX2-FAST-LABEL: load_i16_stride6_vf32:
 ; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $552, %rsp # imm = 0x228
-; AVX2-FAST-NEXT:    vmovdqa 288(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX2-FAST-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[2,3],ymm3[2,3]
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm2[0,1],ymm3[0,1]
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm1[2,3],ymm0[2,3]
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
-; AVX2-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm8
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm10[1],xmm0[2,3],xmm10[4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa %ymm12, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm12[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm13[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1,2,3,4],ymm10[5],ymm4[6,7],ymm10[8],ymm4[9,10,11,12],ymm10[13],ymm4[14,15]
+; AVX2-FAST-NEXT:    subq $488, %rsp # imm = 0x1E8
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqa 224(%rdi), %ymm6
+; AVX2-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX2-FAST-NEXT:    vmovdqa 288(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm3[2,3],ymm2[2,3]
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm11[1],xmm0[2,3],xmm11[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm11, %ymm10, %ymm4
+; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 192(%rdi), %ymm10
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm10[2],ymm6[3,4],ymm10[5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm15
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm15[0],xmm6[1],xmm15[2,3],xmm6[4],xmm15[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm15, %ymm6, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm10
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm13[1],xmm10[2,3],xmm13[4],xmm10[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa %ymm14, %ymm5
+; AVX2-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm8[0],ymm14[1],ymm8[2,3,4,5],ymm14[6],ymm8[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm4
 ; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3],xmm8[4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm13, %ymm1
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm9, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10,11,12],ymm1[13],ymm2[14,15]
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm2
-; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm12, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm3, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4,5,6],ymm1[7],ymm6[8,9],ymm1[10],ymm6[11,12,13,14],ymm1[15]
-; AVX2-FAST-NEXT:    vpblendd $109, (%rsp), %ymm11, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm8 = mem[0],ymm11[1],mem[2,3],ymm11[4],mem[5,6],ymm11[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm12
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm12[2,1,0,3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm8, %xmm12
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm14, %xmm13
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm13[2],xmm12[3],xmm13[4,5],xmm12[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 160(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm12
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm12[3],xmm11[4,5],xmm12[6],xmm11[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm1[0,1,2],ymm11[3,4,5,6,7],ymm1[8,9,10],ymm11[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm0[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm1
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0,1],ymm1[2],ymm12[3,4,5,6],ymm1[7],ymm12[8,9],ymm1[10],ymm12[11,12,13,14],ymm1[15]
-; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm12 = ymm10[0],mem[1],ymm10[2,3],mem[4],ymm10[5,6],mem[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm10
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm10
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0,1],xmm10[2],xmm15[3],xmm10[4,5],xmm15[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1,2],ymm3[3,4,5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 352(%rdi), %ymm6
-; AVX2-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovdqa %ymm6, %ymm10
-; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, %ymm15
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm6
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm13[0,1,2],xmm6[3],xmm13[4,5],xmm6[6],xmm13[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm1
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm2
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm11, %xmm3
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-FAST-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm1
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm2
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[2,1,0,3]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm3
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0],ymm8[1],ymm5[2,3,4,5],ymm8[6],ymm5[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 320(%rdi), %ymm11
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm6
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm12
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm12[0,1,2],xmm6[3],xmm12[4,5],xmm6[6],xmm12[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 160(%rdi), %ymm6
+; AVX2-FAST-NEXT:    vmovdqa 128(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm9
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm9[3],xmm8[4,5],xmm9[6],xmm8[7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $146, (%rsp), %ymm3, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm9 = ymm3[0],mem[1],ymm3[2,3],mem[4],ymm3[5,6],mem[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm3
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm12
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm15[2],xmm3[3],xmm15[4,5],xmm3[6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = ymm14[0],mem[1],ymm14[2,3,4,5],mem[6],ymm14[7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm13[3,4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6,7],ymm3[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4,5,6],ymm11[7],ymm2[8,9],ymm11[10],ymm2[11,12,13,14],ymm11[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0,1],xmm4[2],xmm12[3],xmm4[4,5],xmm12[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3,4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm10
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm10[2],xmm3[3],xmm10[4,5],xmm3[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3,4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm1
+; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm2
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm9, %xmm0
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm2
-; AVX2-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm4
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm4
-; AVX2-FAST-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm5 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4,5,6],ymm5[7],ymm4[8,9],ymm5[10],ymm4[11,12,13,14],ymm5[15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm4 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm5
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3,4],ymm5[5,6],ymm6[7],ymm5[8],ymm6[9,10,11,12],ymm5[13,14],ymm6[15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm15[2],ymm10[3,4],ymm15[5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[2,1,0,3]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm6[0,1,2,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm6
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm9[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm12[4],xmm6[5,6],xmm12[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm12 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,2,3]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm12[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7],ymm5[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm11[2],mem[3,4],ymm11[5],mem[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,0,3]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,1,2,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm4
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm9[4],xmm4[5,6],xmm9[7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm13
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm9[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm15[0],xmm13[1,2],xmm15[3],xmm13[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3,4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm10 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3,4],ymm0[5,6],ymm3[7],ymm0[8],ymm3[9,10,11,12],ymm0[13,14],ymm3[15]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm15[2],mem[3,4],ymm15[5],mem[6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm7
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm11[4],xmm7[5,6],xmm11[7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, (%rsp), %ymm2, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm11 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm14[1,2],xmm6[3],xmm14[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm11[2],ymm6[3,4],ymm11[5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,1,0,3]
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm13, %xmm7
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,1]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm1[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5,6],xmm8[7]
+; AVX2-FAST-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm8 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $36, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm0[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0],xmm12[1,2],xmm15[3],xmm12[4,5,6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6],ymm4[7],ymm7[8],ymm4[9,10,11,12],ymm7[13,14],ymm4[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm8
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm13
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1,2],xmm12[3],xmm13[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
-; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm1[1,2,3,4],ymm6[5,6],ymm1[7],ymm6[8],ymm1[9,10,11,12],ymm6[13,14],ymm1[15]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5,6],xmm3[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1,2],xmm5[3],xmm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = ymm15[0],mem[1],ymm15[2,3],mem[4],ymm15[5,6],mem[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm6
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2,3],xmm6[4],xmm8[5],xmm6[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm8 = mem[0],ymm8[1],mem[2,3],ymm8[4],mem[5,6],ymm8[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm9, %xmm5
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5],xmm5[6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm8[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm15[0,1,2],ymm7[3,4,5,6,7],ymm15[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm15[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6],xmm2[7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm10, %xmm10
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1,2],xmm9[3],xmm10[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm3
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5,6],xmm1[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm3
+; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2],xmm0[3],xmm4[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0],ymm6[1],ymm11[2,3],ymm6[4],ymm11[5,6],ymm6[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm8 = ymm8[0],mem[1],ymm8[2,3],mem[4],ymm8[5,6],mem[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm9, %xmm4
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm6
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm3
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm3
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm6
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm7, 32(%rsi)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm7, (%rsi)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm7, 32(%rdx)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm7, (%rdx)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm7, 32(%rcx)
-; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%rcx)
-; AVX2-FAST-NEXT:    vmovdqa %ymm4, 32(%r8)
-; AVX2-FAST-NEXT:    vmovdqa %ymm1, (%r8)
-; AVX2-FAST-NEXT:    vmovdqa %ymm5, 32(%r9)
-; AVX2-FAST-NEXT:    vmovdqa %ymm6, (%r9)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm6, 32(%rsi)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm6, (%rsi)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm6, 32(%rdx)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm6, (%rdx)
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovaps %ymm6, 32(%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm7, (%rcx)
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, 32(%r8)
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-FAST-NEXT:    vmovdqa %ymm4, 32(%r9)
+; AVX2-FAST-NEXT:    vmovdqa %ymm5, (%r9)
 ; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-FAST-NEXT:    vmovdqa %ymm3, 32(%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rax)
-; AVX2-FAST-NEXT:    addq $552, %rsp # imm = 0x228
+; AVX2-FAST-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-FAST-NEXT:    addq $488, %rsp # imm = 0x1E8
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf32:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $552, %rsp # imm = 0x228
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm8, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 96(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[2,3],ymm3[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm2[0,1],ymm3[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm1[2,3],ymm0[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm10[1],xmm0[2,3],xmm10[4],xmm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm12, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm9 = ymm12[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm13[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1,2,3,4],ymm10[5],ymm4[6,7],ymm10[8],ymm4[9,10,11,12],ymm10[13],ymm4[14,15]
+; AVX2-FAST-PERLANE-NEXT:    subq $488, %rsp # imm = 0x1E8
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 96(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 224(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 192(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm3[2,3],ymm2[2,3]
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm9 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm0[1],ymm2[2,3,4,5],ymm0[6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm1, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm11[1],xmm0[2,3],xmm11[4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm11, %ymm10, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 192(%rdi), %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm10[2],ymm6[3,4],ymm10[5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm12, %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm15[0],xmm6[1],xmm15[2,3],xmm6[4],xmm15[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm15, %ymm6, %ymm4, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm11, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm13[1],xmm10[2,3],xmm13[4],xmm10[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm14, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm8[0],ymm14[1],ymm8[2,3,4,5],ymm14[6],ymm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3],xmm8[4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm13, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm9, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10,11,12],ymm1[13],ymm2[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm14, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm12, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm3, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4,5,6],ymm1[7],ymm6[8,9],ymm1[10],ymm6[11,12,13,14],ymm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, (%rsp), %ymm11, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm8 = mem[0],ymm11[1],mem[2,3],ymm11[4],mem[5,6],ymm11[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm8, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm14 = xmm12[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm8, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm14, %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm13[2],xmm12[3],xmm13[4,5],xmm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 160(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm5, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm12[3],xmm11[4,5],xmm12[6],xmm11[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm1[0,1,2],ymm11[3,4,5,6,7],ymm1[8,9,10],ymm11[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm11 = ymm0[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm11, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0,1],ymm1[2],ymm12[3,4,5,6],ymm1[7],ymm12[8,9],ymm1[10],ymm12[11,12,13,14],ymm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm12 = ymm10[0],mem[1],ymm10[2,3],mem[4],ymm10[5,6],mem[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm4, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0,1],xmm10[2],xmm15[3],xmm10[4,5],xmm15[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1,2],ymm3[3,4,5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 352(%rdi), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 320(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm0, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm13[0,1,2],xmm6[3],xmm13[4,5],xmm6[6],xmm13[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm13, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm12, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm11, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm0, %ymm2, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm4, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm4, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm10, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0],ymm8[1],ymm5[2,3,4,5],ymm8[6],ymm5[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 320(%rdi), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm2, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm5, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm12[0,1,2],xmm6[3],xmm12[4,5],xmm6[6],xmm12[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3,4,5,6,7],ymm1[8,9,10],ymm6[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 160(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 128(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm0, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm9[3],xmm8[4,5],xmm9[6],xmm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, (%rsp), %ymm3, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm9 = ymm3[0],mem[1],ymm3[2,3],mem[4],ymm3[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm9, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm9, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm15[2],xmm3[3],xmm15[4,5],xmm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm15 = ymm14[0],mem[1],ymm14[2,3,4,5],mem[6],ymm14[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm13 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm13[3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3,4,5,6,7],ymm3[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4,5,6],ymm11[7],ymm2[8,9],ymm11[10],ymm2[11,12,13,14],ymm11[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0,1],xmm4[2],xmm12[3],xmm4[4,5],xmm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3,4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm4, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm10, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm10[2],xmm3[3],xmm10[4,5],xmm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm9, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm15, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm9, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm8, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm14, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3],xmm4[4,5],xmm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4,5,6],ymm5[7],ymm4[8,9],ymm5[10],ymm4[11,12,13,14],ymm5[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm7, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm4, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1,2,3,4],ymm5[5,6],ymm6[7],ymm5[8],ymm6[9,10,11,12],ymm5[13,14],ymm6[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm15[2],ymm10[3,4],ymm15[5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm9 = xmm6[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm8, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm9[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm12[4],xmm6[5,6],xmm12[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm12 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm12[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2],xmm3[3],xmm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7],ymm5[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm11[2],mem[3,4],ymm11[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm9[4],xmm4[5,6],xmm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm10, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm9[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm15[0],xmm13[1,2],xmm15[3],xmm13[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3,4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm10 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm10, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3,4],ymm0[5,6],ymm3[7],ymm0[8],ymm3[9,10,11,12],ymm0[13,14],ymm3[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1],ymm15[2],mem[3,4],ymm15[5],mem[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm5, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm11[4],xmm7[5,6],xmm11[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, (%rsp), %ymm2, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm11 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm14[1,2],xmm6[3],xmm14[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm11[2],ymm6[3,4],ymm11[5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm13, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm1[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5,6],xmm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm8 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm0[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0],xmm12[1,2],xmm15[3],xmm12[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm7[0],ymm4[1,2,3,4],ymm7[5,6],ymm4[7],ymm7[8],ymm4[9,10,11,12],ymm7[13,14],ymm4[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm8, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm13, %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1,2],xmm12[3],xmm13[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5,6,7],ymm4[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm12[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm1[1,2,3,4],ymm6[5,6],ymm1[7],ymm6[8],ymm1[9,10,11,12],ymm6[13,14],ymm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5,6],xmm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1,2],xmm5[3],xmm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = ymm15[0],mem[1],ymm15[2,3],mem[4],ymm15[5,6],mem[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm3, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2,3],xmm6[4],xmm8[5],xmm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm8 = mem[0],ymm8[1],mem[2,3],ymm8[4],mem[5,6],ymm8[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm9, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5],xmm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm15 = ymm8[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm15[0,1,2],ymm7[3,4,5,6,7],ymm15[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm15[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6],xmm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm10, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1,2],xmm9[3],xmm10[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm13, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5,6],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm8, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2],xmm0[3],xmm4[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0],ymm6[1],ymm11[2,3],ymm6[4],ymm11[5,6],ymm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm3, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm1, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm8 = ymm8[0],mem[1],ymm8[2,3],mem[4],ymm8[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm9, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm8, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm9, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm8, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4],xmm1[5],xmm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm9, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm8, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, 32(%rsi)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, (%rsi)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, 32(%rdx)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, (%rdx)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, 32(%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 32(%r8)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, (%r8)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, 32(%r9)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, 32(%rsi)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, (%rsi)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, 32(%rdx)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, (%rdx)
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, 32(%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, (%rcx)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, 32(%r8)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%r8)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, 32(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, (%r9)
 ; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm3, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $552, %rsp # imm = 0x228
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    addq $488, %rsp # imm = 0x1E8
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride6_vf32:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
 ; AVX512F-ONLY-SLOW-NEXT:    pushq %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0],ymm0[1],ymm14[2,3],ymm0[4],ymm14[5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm5[2],ymm7[3,4],ymm5[5],ymm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1],ymm5[2],ymm14[3,4],ymm5[5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm2[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm9[3],xmm7[4,5],xmm9[6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm4[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm7[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm7, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm4[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm10[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,1,2,2,4,5,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm4[0,1],ymm0[2],ymm4[3,4,5,6],ymm0[7],ymm4[8,9],ymm0[10],ymm4[11,12,13,14],ymm0[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0],ymm4[1],ymm12[2,3],ymm4[4],ymm12[5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5,6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2],xmm2[3],xmm9[4,5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm7[3,4,5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} ymm22 = ymm2[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm22[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm7[2],ymm9[3,4,5,6],ymm7[7],ymm9[8,9],ymm7[10],ymm9[11,12,13,14],ymm7[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm13, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm7[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm15[3],xmm8[4,5],xmm15[6],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7],ymm9[8,9,10],ymm8[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2],xmm3[3],xmm6[4,5],xmm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3],xmm5[4,5],xmm1[6],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm11, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm10[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4,5,6],ymm1[7],ymm3[8,9],ymm1[10],ymm3[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm13, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0],ymm12[1],ymm9[2,3,4,5],ymm12[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3,4,5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm0[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm10[3],xmm8[4,5],xmm10[6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm8[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm8, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3],xmm7[4,5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3],xmm6[4,5],xmm2[6],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2],ymm14[3,4],ymm0[5],ymm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[0,0,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm14[2],ymm1[3,4],ymm14[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm22
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm19[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm5[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm10[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm20[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm22[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm15[2],ymm12[3],ymm15[4],ymm12[5,6],ymm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm16[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm13[4],xmm1[5,6],xmm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm0[2],ymm15[3],ymm0[4],ymm15[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm15, %zmm17, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm9[1,2],xmm1[3],xmm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm10[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm8, %ymm10, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1,2,3,4],ymm9[5,6],ymm7[7],ymm9[8],ymm7[9,10,11,12],ymm9[13,14],ymm7[15]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm11[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1,2],xmm2[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5,6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1,2],xmm5[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm7[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm8, %ymm13, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1,2,3,4],ymm6[5,6],ymm4[7],ymm6[8],ymm4[9,10,11,12],ymm6[13,14],ymm4[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm17, %zmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0],ymm4[1],ymm14[2,3],ymm4[4],ymm14[5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm10[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm6[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1,2,3,4],ymm9[5],ymm5[6,7],ymm9[8],ymm5[9,10,11,12],ymm9[13],ymm5[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm12[1],ymm0[2,3,4,5],ymm12[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm9[1],xmm0[2,3],xmm9[4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm24, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm9[2],ymm0[3,4],ymm9[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm9[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1],xmm5[2,3],xmm10[4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm10, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm25 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm13[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm22[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm5[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm14[0],ymm13[1,2,3,4],ymm14[5],ymm13[6,7],ymm14[8],ymm13[9,10,11,12],ymm14[13],ymm13[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm9[0],ymm13[1],ymm9[2,3],ymm13[4],ymm9[5,6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm13, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5],xmm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3,4],ymm8[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm7[4],xmm4[5],xmm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm30[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1,2,3,4],ymm6[5],ymm4[6,7],ymm6[8],ymm4[9,10,11,12],ymm6[13],ymm4[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm10, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm11[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm4, %ymm24, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm5[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1,2,3,4],ymm0[5],ymm4[6,7],ymm0[8],ymm4[9,10,11,12],ymm0[13],ymm4[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm4 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0],ymm5[1],ymm10[2,3],ymm5[4],ymm10[5,6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm10[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm12[4],xmm10[5],xmm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3,4,5],ymm15[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm12[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2,3,4],ymm10[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4],xmm3[5],xmm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm19, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm2, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm23, %zmm12 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, (%rdx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm30, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm2, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm19, %zmm13 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm25, %zmm17, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, (%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    popq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
@@ -4630,621 +4383,553 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm5, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm1[0,3,2,3,4,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,1,4,5,u,u,6,7,8,9,8,9,8,9,u,u,16,17,20,21,u,u,22,23,24,25,24,25,24,25,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm6, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} ymm22 = ymm5[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm22[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm1[0,1],ymm7[2],ymm1[3,4,5,6],ymm7[7],ymm1[8,9],ymm7[10],ymm1[11,12,13,14],ymm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm9, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0,1],ymm13[2],ymm15[3,4],ymm13[5],ymm15[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm6[1],ymm12[2,3],ymm6[4],ymm12[5,6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm11[2,3],mem[2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm11, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm2[1],ymm11[2,3,4,5],ymm2[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3,4,5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,8,9,u,u,u,u,u,u,u,u,4,5,u,u,u,u,24,25,u,u,u,u,u,u,u,u,20,21]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1],ymm0[2],ymm12[3,4],ymm0[5],ymm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0,1,2],xmm0[3],xmm14[4,5],xmm0[6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm2[2],ymm11[3,4],ymm2[5],ymm11[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2],xmm2[3],xmm10[4,5],xmm2[6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm2[2,3],mem[2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm9[2],xmm4[3],xmm9[4,5],xmm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3],xmm7[4,5],xmm3[6],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm13[2],xmm1[3],xmm13[4,5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2],xmm8[3],xmm10[4,5],xmm8[6],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,6,7,u,u,6,7,10,11,10,11,10,11,u,u,18,19,22,23,u,u,22,23,26,27,26,27,26,27,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3,4,5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11,12,13,14],ymm5[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3,4,5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,10,11,u,u,u,u,u,u,u,u,6,7,u,u,u,u,26,27,u,u,u,u,u,u,u,u,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm11[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm15[2],ymm13[3,4],ymm15[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6],ymm1[7],ymm0[8,9],ymm1[10],ymm0[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2],ymm14[3,4],ymm0[5],ymm14[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm13[2],ymm14[3],ymm13[4],ymm14[5,6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm22[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm6[1,2,3,4],ymm3[5,6],ymm6[7],ymm3[8],ymm6[9,10,11,12],ymm3[13,14],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm14[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm14[1,2],xmm11[3],xmm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4],xmm13[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm9[1,2,3,4],ymm13[5,6],ymm9[7],ymm13[8],ymm9[9,10,11,12],ymm13[13,14],ymm9[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm0[2],ymm9[3,4],ymm0[5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm14[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm12[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm14[4],xmm0[5,6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0,1,2],ymm0[3,4,5,6,7],ymm13[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm16[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm12[4],xmm1[5,6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm17, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm10[4],xmm5[5,6],xmm10[7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1,2],xmm2[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5,6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm11[0],ymm4[1,2,3,4],ymm11[5,6],ymm4[7],ymm11[8],ymm4[9,10,11,12],ymm11[13,14],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1,2],xmm6[3],xmm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4],ymm4[5,6],ymm1[7],ymm4[8],ymm1[9,10,11,12],ymm4[13,14],ymm1[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3],xmm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm6[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5,6],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0,1,2,3],xmm2[4],xmm7[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm22[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm1[1,2,3,4],ymm8[5],ymm1[6,7],ymm8[8],ymm1[9,10,11,12],ymm8[13],ymm1[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm14[1],xmm0[2,3],xmm14[4],xmm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm8, %ymm22, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm10[0,1,2,3],xmm3[4],xmm10[5],xmm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3],xmm11[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm23, %ymm15, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm19[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm15, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm25[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4],ymm8[5],ymm7[6,7],ymm8[8],ymm7[9,10,11,12],ymm8[13],ymm7[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm14[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0,1,2,3],xmm0[4],xmm14[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5],ymm9[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm9[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1,2,3,4],ymm7[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2,3],xmm10[4],xmm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm23, %ymm10, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2,3,4],ymm2[5],ymm13[6,7],ymm2[8],ymm13[9,10,11,12],ymm2[13],ymm13[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm1[1],xmm10[2,3],xmm1[4],xmm10[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm2, %ymm22, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0,1,2,3],xmm5[4],xmm9[5],xmm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1,2,3,4],ymm1[5],ymm4[6,7],ymm1[8],ymm4[9,10,11,12],ymm1[13],ymm4[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm11, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm21, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm28, %zmm4 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm23, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm22, %zmm4 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm24, %zmm17, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, (%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    addq $136, %rsp
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i16_stride6_vf32:
 ; AVX512DQ-SLOW:       # %bb.0:
+; AVX512DQ-SLOW-NEXT:    pushq %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm12
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm14[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm17
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0,1],ymm2[2],ymm6[3,4],ymm2[5],ymm6[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm16
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm11
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm2[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm8[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,2,4,5,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4,5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11,12,13,14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0],ymm0[1],ymm10[2,3],ymm0[4],ymm10[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm24
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2],xmm1[3],xmm5[4,5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} ymm18 = ymm1[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm1, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm0[0,3,2,3,4,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[3],xmm2[4,5],xmm8[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm16
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm24
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2],xmm1[3],xmm9[4,5],xmm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0],ymm12[1],ymm8[2,3,4,5],ymm12[6],ymm8[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm29
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm11[3],xmm6[4,5],xmm11[6],xmm6[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm6
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm6, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm26
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm5[2],ymm1[3,4,5,6],ymm5[7],ymm1[8,9],ymm5[10],ymm1[11,12,13,14],ymm5[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm13[3],xmm9[4,5],xmm13[6],xmm9[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm9, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm17, %zmm9
 ; AVX512DQ-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm10 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm9 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm13[2],xmm6[3],xmm13[4,5],xmm6[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2],xmm4[3],xmm8[4,5],xmm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm15[3],xmm7[4,5],xmm15[6],xmm7[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1],ymm7[2],ymm8[3,4,5,6],ymm7[7],ymm8[8,9],ymm7[10],ymm8[11,12,13,14],ymm7[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm9, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm17, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm7 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm0[2],ymm12[3,4],ymm0[5],ymm12[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm15[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm12[2],ymm1[3,4],ymm12[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[0,0,0,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6],xmm4[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm22[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm11[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2],ymm4[3,4],ymm1[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm27[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm1[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0],ymm6[1,2,3,4],ymm0[5,6],ymm6[7],ymm0[8],ymm6[9,10,11,12],ymm0[13,14],ymm6[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm6[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3],xmm10[4],xmm11[5,6],xmm10[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm9[0,1,2],ymm10[3,4,5,6,7],ymm9[8,9,10],ymm10[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm9, %zmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4],xmm0[5,6],xmm9[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm17, %zmm19
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1,2],xmm5[3],xmm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm8[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm10, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm8[0],ymm3[1,2,3,4],ymm8[5,6],ymm3[7],ymm8[8],ymm3[9,10,11,12],ymm8[13,14],ymm3[15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm14[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0],xmm4[1,2],xmm8[3],xmm4[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm17, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm15[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm10[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5,6],xmm7[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1,2],xmm4[3],xmm6[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm14, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1,2,3,4],ymm4[5,6],ymm1[7],ymm4[8],ymm1[9,10,11,12],ymm4[13,14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5,6],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm9, %zmm3
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2,3],ymm2[4],ymm12[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm8[4],xmm5[5],xmm8[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm22[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm8[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0],ymm5[1,2,3,4],ymm9[5],ymm5[6,7],ymm9[8],ymm5[9,10,11,12],ymm9[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm13[1],xmm3[2,3],xmm13[4],xmm3[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm20, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm9[2],ymm4[3,4],ymm9[5],ymm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3],xmm9[4],xmm7[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm10, %ymm9, %ymm7
 ; AVX512DQ-SLOW-NEXT:    movw $31, %ax
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm3, %zmm0, %zmm21 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm27[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm18[0,1,0,3,4,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm12[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm13[0],ymm5[1,2,3,4],ymm13[5],ymm5[6,7],ymm13[8],ymm5[9,10,11,12],ymm13[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm13[1],ymm5[2,3],ymm13[4],ymm5[5,6],ymm13[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm13, %xmm11
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm5[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm7, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm7[0,1,0,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm14[0,1,2,3],xmm11[4],xmm14[5],xmm11[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm15[0,1,2,3,4],ymm11[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm14[0,1,2,3],xmm8[4],xmm14[5],xmm8[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2,3,4,5],ymm11[6],ymm14[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm11[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4],ymm8[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2,3],xmm5[4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm28[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm8[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1,2,3,4],ymm6[5],ymm2[6,7],ymm6[8],ymm2[9,10,11,12],ymm6[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm10[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm20, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm6, %zmm0, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm12[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10,11,12],ymm2[13],ymm3[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[0,1,1,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm10, %ymm1, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm2, %zmm0, %zmm0 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm9, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5],xmm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, (%rdx)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, (%rdx)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm17, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, (%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, (%r9)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512DQ-SLOW-NEXT:    popq %rax
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
@@ -5255,302 +4940,265 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[2,1,0,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0,1],ymm4[2],ymm6[3,4],ymm4[5],ymm6[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm24
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm15, %xmm4
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1],ymm13[2],ymm4[3,4],ymm13[5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm10, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm16
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,1,4,5,u,u,6,7,8,9,8,9,8,9,u,u,16,17,20,21,u,u,22,23,24,25,24,25,24,25,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm8, %ymm1
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} ymm18 = ymm3[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm18[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,8,9,u,u,u,u,u,u,u,u,4,5,u,u,u,u,24,25,u,u,u,u,u,u,u,u,20,21]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm9[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm9, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm1[1],ymm12[2,3,4,5],ymm1[6],ymm12[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm13
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm27
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3],xmm12[4,5],xmm0[6],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm1, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3],xmm8[4,5],xmm2[6],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm17, %zmm9
 ; AVX512DQ-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm10 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm9 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm14[2],xmm5[3],xmm14[4,5],xmm5[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm15[3],xmm11[4,5],xmm15[6],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm11, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <2,3,6,7,u,u,6,7,10,11,10,11,10,11,u,u,18,19,22,23,u,u,22,23,26,27,26,27,26,27,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3,4,5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11,12,13,14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2],ymm5[3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,10,11,u,u,u,u,u,u,u,u,6,7,u,u,u,u,26,27,u,u,u,u,u,u,u,u,22,23]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4,5,6],ymm3[7],ymm0[8,9],ymm3[10],ymm0[11,12,13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm9, %xmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm1, %zmm4
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm30
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm15[3],xmm10[4,5],xmm15[6],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm10, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm29
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm8[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm30
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2],xmm0[3],xmm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm9
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm12[0,1],ymm9[2],ymm12[3],ymm9[4],ymm12[5,6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm14[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5,6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm18[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm4[0],ymm11[1,2,3,4],ymm4[5,6],ymm11[7],ymm4[8],ymm11[9,10,11,12],ymm4[13,14],ymm11[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm0[2],ymm4[3,4],ymm0[5],ymm4[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm4[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm12[1,2],xmm9[3],xmm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm11[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm11
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0],ymm7[1,2,3,4],ymm11[5,6],ymm7[7],ymm11[8],ymm7[9,10,11,12],ymm11[13,14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm0[2],ymm7[3,4],ymm0[5],ymm7[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm12[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm12[4],xmm0[5,6],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm19
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm7, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm17[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm6[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm17, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm5[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm17, %zmm18
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[3,1,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1,2],xmm10[3],xmm8[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm12
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm12, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm11[0],ymm5[1,2,3,4],ymm11[5,6],ymm5[7],ymm11[8],ymm5[9,10,11,12],ymm11[13,14],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm4[1,2],xmm11[3],xmm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm22
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5,6],ymm1[7],ymm3[8],ymm1[9,10,11,12],ymm3[13,14],ymm1[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5,6],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm7, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm14[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1,2],ymm4[3,4,5,6,7],ymm1[8,9,10],ymm4[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm20
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm18[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3,4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10,11,12],ymm6[13],ymm5[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm6
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm0[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm12[1],xmm6[2,3],xmm12[4],xmm6[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm18, %ymm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm12[1],ymm9[2,3,4,5],ymm12[6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm13[2],ymm8[3,4],ymm13[5],ymm8[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2,3],xmm11[4],xmm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm19, %ymm13, %ymm4
 ; AVX512DQ-FAST-NEXT:    movw $31, %ax
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm6, %zmm0, %zmm20 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm22[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3,4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10,11,12],ymm6[13],ymm5[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0],ymm6[1],ymm1[2,3],ymm6[4],ymm1[5,6],ymm6[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0,1,2,3],xmm9[4],xmm14[5],xmm9[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm9[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm11, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm11[1,2,3,4],ymm2[5],ymm11[6,7],ymm2[8],ymm11[9,10,11,12],ymm2[13],ymm11[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3],xmm0[4],xmm8[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm2, %ymm18, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1,2,3,4],ymm0[5],ymm3[6,7],ymm0[8],ymm3[9,10,11,12],ymm0[13],ymm3[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2,3,4,5],ymm7[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm7[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm19, %ymm8, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2,3],xmm8[4],xmm3[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm7[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rdx)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm17, %zmm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, (%rcx)
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm13
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-FAST-NEXT:    popq %rax
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
@@ -7823,377 +7471,304 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-LABEL: load_i16_stride6_vf64:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    subq $1272, %rsp # imm = 0x4F8
-; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
+; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa 672(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm3
+; AVX2-SLOW-NEXT:    vmovaps 672(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vmovaps 640(%rdi), %ymm3
 ; AVX2-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm4
 ; AVX2-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqa 480(%rdi), %ymm6
 ; AVX2-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm7
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm7[2,3],ymm6[2,3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[0,1],ymm6[0,1]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm7[2,3],ymm6[2,3]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm7[0,1],ymm6[0,1]
 ; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm5[2,3],ymm4[2,3]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm5[0,1],ymm4[0,1]
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm3[0,1],ymm2[0,1]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm14 = ymm5[0,1],ymm4[0,1]
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
+; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm4[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm3[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm10[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3,4],ymm3[5],ymm2[6,7],ymm3[8],ymm2[9,10,11,12],ymm3[13],ymm2[14,15]
+; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm14[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10,11,12],ymm4[13],ymm2[14,15]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm4, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0],ymm15[1],ymm13[2,3,4,5],ymm15[6],ymm13[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0],ymm2[1,2,3,4],ymm9[5],ymm2[6,7],ymm9[8],ymm2[9,10,11,12],ymm9[13],ymm2[14,15]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm9
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm11[0,1,0,3,4,5,4,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm15 = ymm1[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0],ymm0[1,2,3,4],ymm15[5],ymm0[6,7],ymm15[8],ymm0[9,10,11,12],ymm15[13],ymm0[14,15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm2, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10,11,12],ymm0[13],ymm2[14,15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm2
-; AVX2-SLOW-NEXT:    vpshufd $229, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm8 = mem[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3],xmm8[4],xmm2[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm2, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm14[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10,11,12],ymm0[13],ymm2[14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
-; AVX2-SLOW-NEXT:    vpshufd $229, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm5 = mem[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm13, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm1
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10,11,12],ymm0[13],ymm2[14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm6, %xmm2
-; AVX2-SLOW-NEXT:    vpshufd $229, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm5 = mem[1,1,2,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm1
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm1[0],xmm12[1],xmm1[2,3],xmm12[4],xmm1[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = ymm8[0],mem[1],ymm8[2,3,4,5],mem[6],ymm8[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm12, %ymm11, %ymm11
+; AVX2-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm2[0],xmm9[1],xmm2[2,3],xmm9[4],xmm2[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm10
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm9, %ymm10, %ymm9
+; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, %ymm9
+; AVX2-SLOW-NEXT:    vpblendvb %ymm3, %ymm4, %ymm6, %ymm4
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm13, %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[1,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm15, %ymm4
+; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm3, %ymm4, %ymm3
+; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[1,1,2,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendvb %ymm15, %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm10[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm6, %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm9, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, %ymm5
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm2
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm4
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd $104, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = mem[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm13[0,3,2,3,4,7,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm4 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = ymm14[0],mem[1],ymm14[2,3,4,5],mem[6],ymm14[7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm5, %ymm10
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
-; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm11[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm4 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm1
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3],xmm1[4,5],xmm5[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm1
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm5[0,1,2,2]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm13
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm13[0,1,2],xmm5[3],xmm13[4,5],xmm5[6],xmm13[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm15
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3],xmm15[4,5],xmm14[6],xmm15[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm0[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm13
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm14[2],xmm13[3],xmm14[4,5],xmm13[6,7]
+; AVX2-SLOW-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0],ymm8[1],mem[2,3,4,5],ymm8[6],mem[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa %ymm10, %ymm14
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm8
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm8, %xmm10
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm11[3],xmm10[4,5],xmm11[6],xmm10[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm10[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm11
+; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
 ; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm7
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm2[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm7[0,1,2],xmm13[3],xmm7[4,5],xmm13[6],xmm7[7]
-; AVX2-SLOW-NEXT:    vpshufd $104, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,2,2,1,4,6,6,5]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-SLOW-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm7[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0,1],ymm0[2],ymm14[3,4,5,6],ymm0[7],ymm14[8,9],ymm0[10],ymm14[11,12,13,14],ymm0[15]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm10[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm12[3],xmm7[4,5],xmm12[6],xmm7[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm14, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm5[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm15[2],xmm0[3],xmm15[4,5],xmm0[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm12 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm12, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm9 = mem[0],ymm9[1],mem[2,3,4,5],ymm9[6],mem[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm15
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm10[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm13
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm13[2],xmm9[3],xmm13[4,5],xmm9[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm13
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0,1,2],xmm8[3],xmm13[4,5],xmm8[6],xmm13[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm8 = mem[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4,5,6],ymm0[7],ymm8[8,9],ymm0[10],ymm8[11,12,13,14],ymm0[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
 ; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm8[2],xmm6[3],xmm8[4,5],xmm6[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm12[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm8[3],xmm6[4,5],xmm8[6],xmm6[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm8[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = mem[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4,5,6],ymm0[7],ymm6[8,9],ymm0[10],ymm6[11,12,13,14],ymm0[15]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm0, %ymm14
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm14[3,4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm11, %xmm0
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm3
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm14, %xmm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm1
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm2
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[2,1,2,1,6,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm7[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15]
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm9, %ymm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = mem[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
@@ -8205,903 +7780,811 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2],xmm6[3],xmm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, %ymm8
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,2,3]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm12[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7],ymm3[8,9,10],ymm5[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3,4],xmm3[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm8[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm0[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm3[0],ymm5[1,2,3,4],ymm3[5,6],ymm5[7],ymm3[8],ymm5[9,10,11,12],ymm3[13,14],ymm5[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[2,1,0,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[0,1,2,1]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[0,0,0,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0,1,2,3],xmm5[4],xmm3[5,6],xmm5[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm1[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1,2],xmm11[3],xmm10[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm8[0,1,2],ymm9[3,4,5,6,7],ymm8[8,9,10],ymm9[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,3,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm3[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1,2],xmm12[3],xmm10[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm2, %ymm12
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm13[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm11[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm9[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1,2,3,4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9,10,11,12],ymm8[13,14],ymm9[15]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm10[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm9[2,1,0,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm5[0,0,0,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5,6],xmm10[7]
+; AVX2-SLOW-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm2[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm10[1,2],xmm13[3],xmm10[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm8[0,1,2],ymm9[3,4,5,6,7],ymm8[8,9,10],ymm9[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3,4],xmm8[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm12[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm14[4],xmm13[5,6],xmm14[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm8
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,0,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm9[0,0,0,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,4,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm8[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0,1,2,3],xmm13[4],xmm10[5,6],xmm13[7]
-; AVX2-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm10 = mem[1,1,0,3,5,5,4,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm10[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm14[0,3,2,3,4,7,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm10[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0],ymm14[1,2,3,4],ymm13[5,6],ymm14[7],ymm13[8],ymm14[9,10,11,12],ymm13[13,14],ymm14[15]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm6[0,1],mem[2],ymm6[3,4],mem[5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm14[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm14[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm13[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm11[1,2],xmm6[3],xmm11[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm11
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm13
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm11[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = ymm15[0,1],mem[2],ymm15[3,4],mem[5],ymm15[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm11[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm6[1,2,3,4],ymm0[5,6],ymm6[7],ymm0[8],ymm6[9,10,11,12],ymm0[13,14],ymm6[15]
-; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm6 = mem[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = mem[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5,6],xmm6[7]
-; AVX2-SLOW-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm7 = mem[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm12[1,2],xmm7[3],xmm12[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm15, %ymm0
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm11[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm6[1,2,3,4],ymm0[5,6],ymm6[7],ymm0[8],ymm6[9,10,11,12],ymm0[13,14],ymm6[15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9,10,11,12],ymm0[13,14],ymm2[15]
-; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm2 = mem[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1,2],xmm8[3],xmm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm8
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm11[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7],ymm9[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm5 = mem[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm8 = mem[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1,2],xmm7[3],xmm4[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7],ymm7[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm7[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2],xmm1[3],xmm7[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7],ymm3[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm1 = mem[0,1,2,3,7,5,6,5]
 ; AVX2-SLOW-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # xmm3 = mem[1,1,1,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5,6],xmm1[7]
 ; AVX2-SLOW-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # xmm3 = mem[3,1,2,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # xmm5 = mem[0,1,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[1,1,1,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm10[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3,4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9,10,11,12],ymm1[13,14],ymm3[15]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm13[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm14[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,0,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm5
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm11, %ymm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7],ymm5[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0],ymm15[1],mem[2,3],ymm15[4],mem[5,6],ymm15[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm7
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4],xmm2[5],xmm7[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = ymm7[0],mem[1],ymm7[2,3],mem[4],ymm7[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm7[0,1,0,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm10
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1,2,3,4],ymm9[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm10 = ymm10[0],mem[1],ymm10[2,3],mem[4],ymm10[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm8
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5],xmm8[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm8 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm8 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm11
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5],xmm11[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1,2,3,4],ymm10[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm11[0,1,0,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm11, %xmm14
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm14
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm14[4],xmm13[5],xmm14[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm13 = mem[0,1,2,3,4],ymm13[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm12[0],mem[1],ymm12[2,3],mem[4],ymm12[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm15, %xmm6
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm14[0,1,0,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm12[0,1,2,3],xmm6[4],xmm12[5],xmm6[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm5
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5],xmm5[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5],xmm5[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm7
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm10[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm15, %xmm8
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3],xmm8[4],xmm10[5],xmm8[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm8
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5],xmm8[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm8 = mem[0,1,2,3,4],ymm8[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 96(%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 32(%r8)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, 64(%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 96(%r9)
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm15, %xmm6
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1,2,3],xmm6[4],xmm9[5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 32(%r8)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm1, 64(%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 96(%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm13, 32(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm9, 64(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm10, 64(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%r9)
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 32(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 96(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 32(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm5, 64(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX2-SLOW-NEXT:    addq $1272, %rsp # imm = 0x4F8
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
 ; AVX2-FAST-LABEL: load_i16_stride6_vf64:
 ; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $1336, %rsp # imm = 0x538
-; AVX2-FAST-NEXT:    vmovdqa 672(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovdqa 640(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovdqa 288(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovdqa 480(%rdi), %ymm4
-; AVX2-FAST-NEXT:    vmovdqa 448(%rdi), %ymm5
-; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX2-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    subq $1272, %rsp # imm = 0x4F8
+; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX2-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm9
 ; AVX2-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm6
-; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm6[2,3],ymm7[2,3]
-; AVX2-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm6[0,1],ymm7[0,1]
-; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm5[2,3],ymm4[2,3]
-; AVX2-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm5[0,1],ymm4[0,1]
+; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 672(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovaps 640(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovdqa 288(%rdi), %ymm4
+; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovdqa 480(%rdi), %ymm6
+; AVX2-FAST-NEXT:    vmovdqa 448(%rdi), %ymm7
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm7[2,3],ymm6[2,3]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm3[2,3],ymm2[2,3]
-; AVX2-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[0,1],ymm6[0,1]
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm5[2,3],ymm4[2,3]
+; AVX2-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm5[0,1],ymm4[0,1]
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
+; AVX2-FAST-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
+; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm1[0,1]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm1[2,3],ymm0[2,3]
-; AVX2-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm2
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vmovdqa %ymm3, %ymm6
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 416(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 384(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[0,1,0,3,4,5,4,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0],ymm6[1],ymm13[2,3,4,5],ymm6[6],ymm13[7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vmovdqa 416(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 384(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm2
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vmovdqa %ymm6, %ymm9
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm8[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm4[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm14[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vmovdqa 224(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm3
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm13
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm9, %ymm3, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vmovdqa %ymm9, %ymm8
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 224(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,0,3,4,5,4,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm5[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
-; AVX2-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm3
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm15[1],ymm12[2,3,4,5],ymm15[6],ymm12[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm1
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa 608(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm3
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[2,2,2,2,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm0
+; AVX2-FAST-NEXT:    vmovdqa 576(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm3[1,2,3,4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10,11,12],ymm2[13],ymm3[14,15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3],xmm0[4],xmm10[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm14[2,2,2,2,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm1[0],xmm12[1],xmm1[2,3],xmm12[4],xmm1[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm12, %ymm11, %ymm11
+; AVX2-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm12
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm12[1],xmm2[2,3],xmm12[4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm10, %ymm10
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm2, %ymm10, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm2
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu (%rsp), %ymm4 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm4
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm2
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm3
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm3
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm14, %xmm2
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0],xmm1[1],xmm7[2,3],xmm1[4],xmm7[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm8, %ymm14, %ymm0
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm1
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm0
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm1
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4,5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11,12,13,14],ymm0[15]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
+; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm0
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
+; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa %xmm2, %xmm11
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm5
-; AVX2-FAST-NEXT:    vmovdqa %xmm1, %xmm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = ymm6[0],mem[1],ymm6[2,3,4,5],mem[6],ymm6[7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, %ymm4
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqa 544(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm6
-; AVX2-FAST-NEXT:    vmovdqa %xmm2, %xmm15
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm6
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1],ymm0[2],ymm7[3,4,5,6],ymm0[7],ymm7[8,9],ymm0[10],ymm7[11,12,13,14],ymm0[15]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa %xmm8, %xmm5
-; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm0
-; AVX2-FAST-NEXT:    vmovdqa %xmm3, %xmm4
-; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm11
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2],xmm0[3],xmm11[4,5],xmm0[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 544(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm11
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm1
+; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm0
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm14
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm14[0,1,2],xmm11[3],xmm14[4,5],xmm11[6],xmm14[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa %xmm8, %xmm6
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
+; AVX2-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm14
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
+; AVX2-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm1
+; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm15
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3],xmm15[4,5],xmm0[6],xmm15[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm11
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm8 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm14
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1],ymm11[2],ymm14[3,4,5,6],ymm11[7],ymm14[8,9],ymm11[10],ymm14[11,12,13,14],ymm11[15]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm14
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm14
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm13
+; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm14
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[2,1,0,3]
+; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa %xmm11, %xmm15
+; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm14
+; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm13
+; AVX2-FAST-NEXT:    vmovdqa %xmm6, %xmm8
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1],xmm13[2],xmm14[3],xmm13[4,5],xmm14[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5],ymm13[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm12
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5],ymm13[6,7]
+; AVX2-FAST-NEXT:    vmovdqa 736(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm13, %xmm14
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm5
-; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm15
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3],xmm15[4,5],xmm14[6],xmm15[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm11[0,1,2],ymm14[3,4,5,6,7],ymm11[8,9,10],ymm14[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm14[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm0
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm6
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm11
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2],xmm0[3],xmm11[4,5],xmm0[6],xmm11[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm11 = mem[2,1,2,1,6,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm11, %ymm14
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm15 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vpshufb %ymm10, %ymm15, %ymm12
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm12[0,1],ymm14[2],ymm12[3,4,5,6],ymm14[7],ymm12[8,9],ymm14[10],ymm12[11,12,13,14],ymm14[15]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm12 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm12[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm10
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm9
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovdqa 736(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa 704(%rdi), %ymm4
-; AVX2-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm4[0,1],ymm1[2],ymm4[3,4],ymm1[5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm9[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm4
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0,1,2],xmm1[3],xmm14[4,5],xmm1[6],xmm14[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm11
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm10
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm10 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm1
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3],xmm8[4,5],xmm1[6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm8 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm15
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm15
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm15[2],xmm7[3],xmm15[4,5],xmm7[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm14
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm14[3,4,5],ymm7[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX2-FAST-NEXT:    # xmm1 = mem[0,1,2,3,5,5,5,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5,6,7],ymm7[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm14[2],ymm1[3,4,5,6],ymm14[7],ymm1[8,9],ymm14[10],ymm1[11,12,13,14],ymm14[15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm14
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm14[2],xmm7[3],xmm14[4,5],xmm7[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3,4,5],ymm7[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm14, %xmm14
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0,1,2],xmm3[3],xmm14[4,5],xmm3[6],xmm14[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm1
-; AVX2-FAST-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm2
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
-; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm2
-; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm2
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm0
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpshufhw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX2-FAST-NEXT:    # xmm2 = mem[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm2
-; AVX2-FAST-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7],ymm2[8,9],ymm3[10],ymm2[11,12,13,14],ymm3[15]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm1
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,5,5,5,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX2-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,5,5,5,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm1
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm2
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $219, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
-; AVX2-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm0
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
+; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm0
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm5
+; AVX2-FAST-NEXT:    vmovdqa %xmm0, %xmm10
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm4[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2],xmm6[3],xmm5[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX2-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm6
+; AVX2-FAST-NEXT:    vmovdqa %ymm0, %ymm9
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3,4,5,6,7],ymm6[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
+; AVX2-FAST-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,1,0,3]
 ; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,1,2,1]
-; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm2
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm15 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
+; AVX2-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm3[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm2[0,1,2,3],xmm12[4],xmm2[5,6],xmm12[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm2
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[0,3,2,1]
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm13
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[0,3,2,1]
 ; AVX2-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[2,1,2,3]
-; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0],xmm0[1,2],xmm13[3],xmm0[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm13
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5,6,7],ymm14[8,9,10],ymm13[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm14[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm13
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1,2],xmm14[3],xmm13[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm14
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1,2],ymm12[3,4,5,6,7],ymm14[8,9,10],ymm12[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm14[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm12[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm13
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm14
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm14[1,2,3,4],ymm13[5,6],ymm14[7],ymm13[8],ymm14[9,10,11,12],ymm13[13,14],ymm14[15]
+; AVX2-FAST-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[2,1,0,3]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm15, %xmm0
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm14[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm9[4],xmm0[5,6],xmm9[7]
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm13 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,0,3]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm13[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm8
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm0
-; AVX2-FAST-NEXT:    vmovdqa %xmm7, %xmm2
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1,2],xmm12[3],xmm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm11 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm6
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
+; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm0
+; AVX2-FAST-NEXT:    vmovdqa %xmm10, %xmm3
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1,2],xmm5[3],xmm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3,4,5,6,7],ymm13[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm5
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm1[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm10
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,1,2,1]
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5,6],xmm11[7]
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm7
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm1
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm10
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1,2,3,4],ymm0[5,6],ymm10[7],ymm0[8],ymm10[9,10,11,12],ymm0[13,14],ymm10[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX2-FAST-NEXT:    # xmm5 = mem[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm5[4],xmm1[5,6],xmm5[7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-FAST-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX2-FAST-NEXT:    # xmm13 = mem[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0],xmm1[1,2],xmm13[3],xmm1[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,1,0,3]
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm1
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,1,2,1]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,6,5,6,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9,10,11,12],ymm0[13,14],ymm2[15]
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm15, %xmm2
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm14[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm13[4],xmm2[5,6],xmm13[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm8
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
+; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm1
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm3[2,1,2,0,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1,2],xmm9[3],xmm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm5[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0,1,2],ymm2[3,4,5,6,7],ymm9[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,5,4]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm9[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm8[1,2,3,4],ymm2[5,6],ymm8[7],ymm2[8],ymm8[9,10,11,12],ymm2[13,14],ymm8[15]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
 ; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm8
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
 ; AVX2-FAST-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX2-FAST-NEXT:    # xmm9 = mem[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
-; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm9
-; AVX2-FAST-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX2-FAST-NEXT:    # xmm13 = mem[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm13[0],xmm9[1,2],xmm13[3],xmm9[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm2[0,1,2],ymm8[3,4,5,6,7],ymm2[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm9[4],xmm1[5,6],xmm9[7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1,2],xmm4[3],xmm7[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm4
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1,2,3,4],ymm2[5,6],ymm4[7],ymm2[8],ymm4[9,10,11,12],ymm2[13,14],ymm4[15]
-; AVX2-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm4
-; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm7, %xmm4
-; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm1
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm15, %ymm6
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX2-FAST-NEXT:    # xmm6 = mem[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6],xmm6[7]
+; AVX2-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm6
+; AVX2-FAST-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX2-FAST-NEXT:    # xmm11 = mem[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm11[0],xmm6[1,2],xmm11[3],xmm6[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm11
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm11[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm2, %xmm12, %xmm0
+; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5,6],xmm2[7]
+; AVX2-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm2
+; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm5
+; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,3,2,1]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm9
+; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,1]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm3
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm9
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3],xmm3[4],xmm9[5],xmm3[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpblendd $146, (%rsp), %ymm0, %ymm9 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm9 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm11
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm12
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm11
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm9, %xmm12
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4],xmm12[5],xmm11[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
@@ -9111,8 +8594,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    # ymm12 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm13, %xmm14
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm15
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm13, %xmm14
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm15
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5],xmm14[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
@@ -9121,34 +8604,34 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm15 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm0
-; AVX2-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm6
+; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm7
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm15, %xmm7
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm8
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
-; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5],xmm4[6,7]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm5
+; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
+; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6,7]
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm10, %xmm5
 ; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0,1,2,3],xmm5[4],xmm9[5],xmm5[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm13, %xmm9
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm9
 ; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm10
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4],xmm10[5],xmm9[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-FAST-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm9 = mem[0,1,2,3,4],ymm9[5,6,7]
-; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX2-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm1
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -9178,13 +8661,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 64(%rcx)
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, (%rcx)
-; AVX2-FAST-NEXT:    vmovdqa %ymm8, 96(%r8)
+; AVX2-FAST-NEXT:    vmovdqa %ymm6, 96(%r8)
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 32(%r8)
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 64(%r8)
-; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%r8)
-; AVX2-FAST-NEXT:    vmovdqa %ymm6, 96(%r9)
+; AVX2-FAST-NEXT:    vmovdqa %ymm4, (%r8)
+; AVX2-FAST-NEXT:    vmovdqa %ymm7, 96(%r9)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm14, 32(%r9)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm11, 64(%r9)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm3, (%r9)
@@ -9192,592 +8675,516 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa %ymm0, 96(%rax)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm9, 32(%rax)
 ; AVX2-FAST-NEXT:    vmovdqa %ymm5, 64(%rax)
-; AVX2-FAST-NEXT:    vmovdqa %ymm4, (%rax)
-; AVX2-FAST-NEXT:    addq $1336, %rsp # imm = 0x538
+; AVX2-FAST-NEXT:    vmovdqa %ymm2, (%rax)
+; AVX2-FAST-NEXT:    addq $1272, %rsp # imm = 0x4F8
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: load_i16_stride6_vf64:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $1336, %rsp # imm = 0x538
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 672(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 640(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 480(%rdi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 448(%rdi), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    subq $1272, %rsp # imm = 0x4F8
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %ymm9
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 64(%rdi), %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 96(%rdi), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm6[2,3],ymm7[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm6[0,1],ymm7[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm5[2,3],ymm4[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm5[0,1],ymm4[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 672(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 640(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 480(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 448(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm7[2,3],ymm6[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm3[2,3],ymm2[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[0,1],ymm6[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm5[2,3],ymm4[2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm5[0,1],ymm4[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm2[2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm1[0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm1[2,3],ymm0[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm10, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm10, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm3, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm11 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0],ymm4[1],ymm3[2,3,4,5],ymm4[6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm10, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 416(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 384(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[0,1,0,3,4,5,4,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0],ymm6[1],ymm13[2,3,4,5],ymm6[6],ymm13[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm13[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 416(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 384(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm7, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm7, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm15 = ymm8[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm15, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm14 = ymm4[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm14[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 224(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 192(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm6, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm6, %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm9, %ymm3, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 224(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,0,3,4,5,4,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm9 = ymm5[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 608(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm13, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm15[1],ymm12[2,3,4,5],ymm15[6],ymm12[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm5, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 576(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 608(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm5, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[2,2,2,2,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm8, %ymm3, %ymm2, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 576(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm8 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm3[1,2,3,4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10,11,12],ymm2[13],ymm3[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3],xmm0[4],xmm10[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm14[2,2,2,2,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm1[0],xmm12[1],xmm1[2,3],xmm12[4],xmm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0],ymm1[1],mem[2,3,4,5],ymm1[6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm12, %ymm11, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm9, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm12[1],xmm2[2,3],xmm12[4],xmm2[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm12 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm12, %ymm10, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm2, %ymm10, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm8, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3],xmm2[4],xmm4[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm4 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm12, %ymm4, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm2, %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm3, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm13, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm12, %ymm5, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm2, %ymm3, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm14, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm7, %ymm0, %ymm1, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm7, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0],xmm1[1],xmm7[2,3],xmm1[4],xmm7[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm14, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm15, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm13, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm8, %ymm9, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm11, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10,11,12],ymm0[13],ymm1[14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm12, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm10, %ymm1, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm1, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4,5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11,12,13,14],ymm0[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm7, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm7, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm2, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 160(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm6[0],mem[1],ymm6[2,3,4,5],mem[6],ymm6[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 544(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm1, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm1, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0,1,2],ymm5[3,4,5,6,7],ymm0[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm1, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1],ymm0[2],ymm7[3,4,5,6],ymm0[7],ymm7[8,9],ymm0[10],ymm7[11,12,13,14],ymm0[15]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm7, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm8, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm7, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm2, %xmm11
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2],xmm0[3],xmm11[4,5],xmm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 544(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 512(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm3, %xmm11
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm5, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm1, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm11 = xmm14[0,1,2],xmm11[3],xmm14[4,5],xmm11[6],xmm14[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm5, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm8, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm15[0],mem[1],ymm15[2,3,4,5],mem[6],ymm15[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm1, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,1,2,1,6,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm4, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm4, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3],xmm15[4,5],xmm0[6],xmm15[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3,4,5,6,7],ymm14[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm0, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm8 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm8, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1],ymm11[2],ymm14[3,4,5,6],ymm11[7],ymm14[8,9],ymm11[10],ymm14[11,12,13,14],ymm11[15]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm2, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm6, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm14
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm11, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm11, %xmm3, %xmm14
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm0, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm6, %xmm8
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1],xmm13[2],xmm14[3],xmm13[4,5],xmm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5],ymm13[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm2, %ymm0, %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 736(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm13, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm13, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm5, %xmm15
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3],xmm15[4,5],xmm14[6],xmm15[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm14 = ymm11[0,1,2],ymm14[3,4,5,6,7],ymm11[8,9,10],ymm14[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm14[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm13, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm13, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm6, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2],xmm0[3],xmm11[4,5],xmm0[6],xmm11[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $102, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm11 = mem[2,1,2,1,6,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm11, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm15 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm10, %ymm15, %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm14 = ymm12[0,1],ymm14[2],ymm12[3,4,5,6],ymm14[7],ymm12[8,9],ymm14[10],ymm12[11,12,13,14],ymm14[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm12 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm12[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm4, %xmm10, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 736(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 160(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa 128(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa 704(%rdi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm9 = ymm4[0,1],ymm1[2],ymm4[3,4],ymm1[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm9[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm9, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0,1,2],xmm1[3],xmm14[4,5],xmm1[6],xmm14[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm11, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm10 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm10, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2],xmm1[3],xmm8[4,5],xmm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm8 = mem[0],ymm0[1],mem[2,3,4,5],ymm0[6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm8, %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm0, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm15[2],xmm7[3],xmm15[4,5],xmm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm15, %ymm0, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm14[3,4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # xmm1 = mem[0,1,2,3,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5,6,7],ymm7[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm14[2],ymm1[3,4,5,6],ymm14[7],ymm1[8,9],ymm14[10],ymm1[11,12,13,14],ymm14[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm14, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm14[2],xmm7[3],xmm14[4,5],xmm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3,4,5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm14, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0,1,2],xmm3[3],xmm14[4,5],xmm3[6],xmm14[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm8, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm6, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm15, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm10, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm4, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm3, %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm2 = mem[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm14, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7],ymm2[8,9],ymm3[10],ymm2[11,12,13,14],ymm3[15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm6, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm5, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm10, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm12, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm15, %ymm8, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[2,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm7, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm4[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2],xmm6[3],xmm5[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm0, %ymm3, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3,4,5,6,7],ymm6[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm14 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm0, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm15 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm3[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm12 = xmm2[0,1,2,3],xmm12[4],xmm2[5,6],xmm12[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[0,3,2,1]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm1 = xmm3[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0],xmm0[1,2],xmm13[3],xmm0[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5,6,7],ymm14[8,9,10],ymm13[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm14[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm0, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1,2],xmm14[3],xmm13[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm1, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0,1,2],ymm12[3,4,5,6,7],ymm14[8,9,10],ymm12[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm14[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm12[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm14[1,2,3,4],ymm13[5,6],ymm14[7],ymm13[8],ymm14[9,10,11,12],ymm13[13,14],ymm14[15]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm15 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm15, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm14[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm9[4],xmm0[5,6],xmm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm13 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm13, %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm14, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm13[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm9, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm8, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1,2],xmm12[3],xmm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm11 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm6, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm10, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1,2],xmm5[3],xmm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3,4,5,6,7],ymm13[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm9, %ymm15, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,3,2,3,4,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9,10,11,12],ymm0[13,14],ymm1[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm1[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm12, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,1,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,6,5,6,4]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5,6],xmm11[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm7, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[2,1,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm4 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm5, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1,2,3,4],ymm0[5,6],ymm10[7],ymm0[8],ymm10[9,10,11,12],ymm0[13,14],ymm10[15]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm5 = mem[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm5[4],xmm1[5,6],xmm5[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm13 = mem[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0],xmm1[1,2],xmm13[3],xmm1[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,1,0,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,1,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm8[0,1,2,3,6,5,6,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm0[4],xmm1[5,6],xmm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm1, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9,10,11,12],ymm0[13,14],ymm2[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm15, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm14[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm13[4],xmm2[5,6],xmm13[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm8, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1,2],xmm9[3],xmm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm10
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm10, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm3[2,1,2,0,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1,2],xmm9[3],xmm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1,2,3,4],xmm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm9 = ymm5[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0,1,2],ymm2[3,4,5,6,7],ymm9[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,5,4]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm9[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm8[1,2,3,4],ymm2[5,6],ymm8[7],ymm2[8],ymm8[9,10,11,12],ymm2[13,14],ymm8[15]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm0, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # xmm9 = mem[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm0, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # xmm13 = mem[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm13[0],xmm9[1,2],xmm13[3],xmm9[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm8 = ymm2[0,1,2],ymm8[3,4,5,6,7],ymm2[8,9,10],ymm8[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm9[4],xmm1[5,6],xmm9[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1,2],xmm4[3],xmm7[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm6, %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1,2,3,4],ymm2[5,6],ymm4[7],ymm2[8],ymm4[9,10,11,12],ymm2[13,14],ymm4[15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm10, %xmm12, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm5, %xmm7, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3,4],xmm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm6, %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1,2],xmm6[3],xmm1[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm15, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # xmm6 = mem[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5,6],xmm6[7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # xmm11 = mem[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm11[0],xmm6[1,2],xmm11[3],xmm6[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm1, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3,4,5,6,7],ymm11[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm11[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm12, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5,6],xmm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm7, %ymm5, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm10, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm5, %xmm3
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm2, %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3],xmm3[4],xmm9[5],xmm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, (%rsp), %ymm0, %ymm9 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm9 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm10, %xmm11
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm9, %xmm12
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm10, %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm9, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4],xmm12[5],xmm11[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
@@ -9787,8 +9194,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    # ymm12 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm13, %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm12, %xmm15
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm13, %xmm14
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm12, %xmm15
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5],xmm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
@@ -9797,34 +9204,34 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm15 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm15, %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm0, %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm0, %xmm7
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm15, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm15, %xmm8
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5],xmm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm10, %xmm5
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1,2,3,4],ymm2[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm10, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0,1,2,3],xmm5[4],xmm9[5],xmm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm13, %xmm9
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm13, %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm12, %xmm10
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4],xmm10[5],xmm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm9 = mem[0,1,2,3,4],ymm9[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm1, %xmm15, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -9854,13 +9261,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 64(%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, (%rcx)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm8, 96(%r8)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, 96(%r8)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 32(%r8)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 64(%r8)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%r8)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm6, 96(%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, (%r8)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm7, 96(%r9)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm14, 32(%r9)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm11, 64(%r9)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm3, (%r9)
@@ -9868,239 +9275,214 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 96(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm9, 32(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm5, 64(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm4, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $1336, %rsp # imm = 0x538
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    addq $1272, %rsp # imm = 0x4F8
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride6_vf64:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $1544, %rsp # imm = 0x608
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    subq $1416, %rsp # imm = 0x588
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm17[0,2,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} ymm17 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm17[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 480(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6],ymm1[7],ymm0[8,9],ymm1[10],ymm0[11,12,13,14],ymm1[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm12, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 480(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} ymm21 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6],ymm1[7],ymm0[8,9],ymm1[10],ymm0[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm1, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm15[0,2,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm14, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 96(%rdi), %ymm0, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm19[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6],ymm1[7],ymm0[8,9],ymm1[10],ymm0[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0],ymm4[1],ymm1[2,3,4,5],ymm4[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[0,2,2,1,4,6,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, 288(%rdi), %ymm0, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm24[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6],ymm2[7],ymm0[8,9],ymm2[10],ymm0[11,12,13,14],ymm2[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0,1,2],xmm9[3],xmm14[4,5],xmm9[6],xmm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0,1],xmm0[2],xmm9[3],xmm0[4,5],xmm9[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm15, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm13[3],xmm9[4,5],xmm13[6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm7[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0,1,2],xmm11[3],xmm2[4,5],xmm11[6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm15, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm14, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1],xmm13[2],xmm14[3],xmm13[4,5],xmm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm6, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4,5],xmm3[6],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm8, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3,4,5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm22[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1],ymm0[2],ymm5[3,4,5,6],ymm0[7],ymm5[8,9],ymm0[10],ymm5[11,12,13,14],ymm0[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm27[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm27, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4,5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11,12,13,14],ymm0[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
@@ -10108,1187 +9490,1058 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm22[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, (%rsp), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1],ymm3[2],mem[3,4],ymm3[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm13 = mem[0,1],ymm4[2],mem[3],ymm4[4],mem[5,6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm9[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm17[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm7[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm2, %xmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm24[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm15[4],xmm2[5,6],xmm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm31, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm16, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5,6],ymm1[7],ymm2[8],ymm1[9,10,11,12],ymm2[13,14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm13[1,2],xmm2[3],xmm13[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm5, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0],ymm2[1,2,3,4],ymm13[5,6],ymm2[7],ymm13[8],ymm2[9,10,11,12],ymm13[13,14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3],xmm12[4],xmm13[5,6],xmm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm2[0,1,2],ymm12[3,4,5,6,7],ymm2[8,9,10],ymm12[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm16, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm24, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[1,1,1,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm8, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1,2,3,4],ymm6[5,6],ymm2[7],ymm6[8],ymm2[9,10,11,12],ymm6[13,14],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm14, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm7[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1,2,3,4],ymm2[5,6],ymm5[7],ymm2[8],ymm5[9,10,11,12],ymm2[13,14],ymm5[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm13, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm28, %zmm29, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm24, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm31, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm16, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm29, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm24, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = ymm3[0],mem[1],ymm3[2,3],mem[4],ymm3[5,6],mem[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10,11,12],ymm4[13],ymm2[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = ymm4[0,1],mem[2],ymm4[3,4],mem[5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm31, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm4, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm17 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm19[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm4[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10,11,12],ymm4[13],ymm2[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, (%rsp), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm6, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm19
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm29
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm30 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm31, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm21 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1,2,3],xmm6[4],xmm14[5],xmm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm6, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm27
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm0, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm31 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm7 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm10[4],xmm1[5],xmm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0],xmm10[1],xmm2[2,3],xmm10[4],xmm2[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm19[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm13, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm8, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0,1,2,3],xmm8[4],xmm13[5],xmm8[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3],xmm14[4],xmm10[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm8, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[1,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3],xmm6[4],xmm3[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5],xmm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm15[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4],ymm15[5],ymm14[6,7],ymm15[8],ymm14[9,10,11,12],ymm15[13],ymm14[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm22[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm15[1],xmm11[2,3],xmm15[4],xmm11[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm14, %ymm31, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm11, %zmm3 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm14[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0],ymm14[1,2,3,4],ymm11[5],ymm14[6,7],ymm11[8],ymm14[9,10,11,12],ymm11[13],ymm14[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5],xmm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[1,1,0,3,5,5,4,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm15[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4],ymm15[5],ymm14[6,7],ymm15[8],ymm14[9,10,11,12],ymm15[13],ymm14[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm13[1],xmm0[2,3],xmm13[4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm14, %ymm31, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm10, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm11[4],xmm9[5],xmm11[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm9, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm8[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm7[1,2,3,4],ymm0[5],ymm7[6,7],ymm0[8],ymm7[9,10,11,12],ymm0[13],ymm7[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm10, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4],ymm2[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rsi)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 64(%rsi)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 64(%rdx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm2, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm2, %zmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, 64(%rcx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm30, %zmm4, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 64(%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm30, 64(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 64(%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 64(%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, (%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $1544, %rsp # imm = 0x608
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    addq $1416, %rsp # imm = 0x588
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1528, %rsp # imm = 0x5F8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    subq $1512, %rsp # imm = 0x5E8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,0,1,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 480(%rdi), %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,1,4,5,u,u,6,7,8,9,8,9,8,9,u,u,16,17,20,21,u,u,22,23,24,25,24,25,24,25,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 480(%rdi), %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 640(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,8,9,u,u,u,u,u,u,u,u,4,5,u,u,u,u,24,25,u,u,u,u,u,u,u,u,20,21>
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 672(%rdi), %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm25
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 640(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 672(%rdi), %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm8, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm26
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm13, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm15[2],xmm0[3],xmm15[4,5],xmm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm10, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, 288(%rdi), %ymm0, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm25[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1],ymm3[2],ymm0[3,4,5,6],ymm3[7],ymm0[8,9],ymm3[10],ymm0[11,12,13,14],ymm3[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0,1,2],xmm0[3],xmm14[4,5],xmm0[6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7],ymm9[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm3[0,1,2],xmm12[3],xmm3[4,5],xmm12[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,3,6,7,u,u,6,7,10,11,10,11,10,11,u,u,18,19,22,23,u,u,22,23,26,27,26,27,26,27,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4,5,6],ymm6[7],ymm4[8,9],ymm6[10],ymm4[11,12,13,14],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3,4,5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3,4,5],ymm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,10,11,u,u,u,u,u,u,u,u,6,7,u,u,u,u,26,27,u,u,u,u,u,u,u,u,22,23>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4,5,6],ymm6[7],ymm4[8,9],ymm6[10],ymm4[11,12,13,14],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7],ymm4[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5],ymm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm11[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm31
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm24
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm10, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm3[1,2,3,4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9,10,11,12],ymm1[13,14],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm6 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm23
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm8[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm9, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3,4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9,10,11,12],ymm6[13,14],ymm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm7 = ymm7[0,1],mem[2],ymm7[3,4],mem[5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm9, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm8[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm9[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm9, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5,6],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm30, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm7, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm13, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm29, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm4[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm15 = xmm1[0,1,2,3],xmm15[4],xmm1[5,6],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm11[0,1,2],ymm15[3,4,5,6,7],ymm11[8,9,10],ymm15[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm0[1,2],xmm11[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm11, %xmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1,2],xmm13[3],xmm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3],xmm11[4],xmm13[5,6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3,4,5,6,7],ymm14[8,9,10],ymm11[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1,2],xmm10[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm24
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = mem[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm11[0,3,2,3,4,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm5[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm12[4],xmm0[5,6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm25, %zmm30, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm31
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm1 = mem[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm2[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm12[4],xmm1[5,6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0],ymm0[1,2,3,4],ymm11[5,6],ymm0[7],ymm11[8],ymm0[9,10,11,12],ymm11[13,14],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm10, %xmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm1[1,2],xmm10[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3,4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9,10,11,12],ymm10[13,14],ymm1[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm4[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5,6],xmm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm1[0,1,2],ymm10[3,4,5,6,7],ymm1[8,9,10],ymm10[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm12, %zmm30, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm4[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm10[4],xmm1[5,6],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1,2],xmm9[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1,2,3,4],ymm9[5,6],ymm8[7],ymm9[8],ymm8[9,10,11,12],ymm9[13,14],ymm8[15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm8[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm30, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm29, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm25, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, (%rsp), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm13 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm17, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm3, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm30 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm26[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0],ymm3[1],mem[2,3],ymm3[4],mem[5,6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm28
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm10[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm30 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm22
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm3, %ymm17, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm7 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm20 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,3,4,5,4,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,1,2,1,6,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1,2,3],xmm6[4],xmm14[5],xmm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm15[1,2,3,4],ymm0[5],ymm15[6,7],ymm0[8],ymm15[9,10,11,12],ymm0[13],ymm15[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm14 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm3 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm31 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm7 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm8[4],xmm0[5],xmm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm11, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm17, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm12, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm11[0],xmm1[1],xmm11[2,3],xmm1[4],xmm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0,1,2,3],xmm1[4],xmm13[5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0,1,2,3],xmm1[4],xmm15[5],xmm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10,11,12],ymm0[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0,1,2,3],xmm2[4],xmm13[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2,3,4],ymm2[5],ymm13[6,7],ymm2[8],ymm13[9,10,11,12],ymm2[13],ymm13[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm13[1],xmm8[2,3],xmm13[4],xmm8[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm2, %ymm17, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm9, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm9[1,2,3,4],ymm6[5],ymm9[6,7],ymm6[8],ymm9[9,10,11,12],ymm6[13],ymm9[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm15, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, (%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm3, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm3, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm3, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm4, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 64(%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, 64(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, (%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 64(%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1528, %rsp # imm = 0x5F8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1512, %rsp # imm = 0x5E8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i16_stride6_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $968, %rsp # imm = 0x3C8
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    subq $840, %rsp # imm = 0x348
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm28[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm25[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
@@ -11297,217 +10550,190 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm20
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 480(%rdi), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm8, %xmm24
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm24[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm2
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm3, %xmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm21[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
 ; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 480(%rdi), %ymm1, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm3[1],ymm1[2,3,4,5],ymm3[6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm3, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm20
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm5, %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm16
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm18, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm19
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 672(%rdi), %ymm2, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm3
 ; AVX512DQ-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm2
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4,5,6],ymm1[7],ymm0[8,9],ymm1[10],ymm0[11,12,13,14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1,2],ymm0[3,4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[0,2,2,1,4,6,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm29
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, 288(%rdi), %ymm0, %ymm31
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm31[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm6[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6],ymm2[7],ymm0[8,9],ymm2[10],ymm0[11,12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0,1,2],xmm9[3],xmm14[4,5],xmm9[6],xmm14[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm18, %zmm3
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0,1],xmm0[2],xmm9[3],xmm0[4,5],xmm9[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm15, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm13[3],xmm9[4,5],xmm13[6],xmm9[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm21[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm9[2],ymm5[3,4,5,6],ymm9[7],ymm5[8,9],ymm9[10],ymm5[11,12,13,14],ymm9[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm9[2],xmm4[3],xmm9[4,5],xmm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3,4,5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm29[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm29, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm6[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm18, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm20, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11,12,13,14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm7[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0,1,2],xmm10[3],xmm2[4,5],xmm10[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm2, %ymm12
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm16, %zmm6
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm10, %zmm0, %zmm6 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm14, %xmm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm12[2],xmm13[3],xmm12[4,5],xmm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2],xmm3[3],xmm6[4,5],xmm3[6],xmm6[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm3, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm5, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm6[2],xmm1[3],xmm6[4,5],xmm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2],ymm5[3,4,5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm5
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm25, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm18, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2],ymm2[3,4,5],ymm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -11517,1121 +10743,989 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm20
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm22
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm17
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm21[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm25
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm24
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm31[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1],ymm0[2],ymm3[3],ymm0[4],ymm3[5,6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm16
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1],ymm3[2],mem[3,4],ymm3[5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm23
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm4[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm10[0,1,2,3,6,5,6,4]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm26, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, %zmm31
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm17
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm21
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm9[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm16[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,3,2,3,4,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm7[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9,10,11,12],ymm1[13,14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm2, %xmm20
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm20[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm15[4],xmm2[5,6],xmm15[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm26, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm31, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm14, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm17
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5,6],ymm1[7],ymm2[8],ymm1[9,10,11,12],ymm2[13,14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm14
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm5, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0],ymm2[1,2,3,4],ymm14[5,6],ymm2[7],ymm14[8],ymm2[9,10,11,12],ymm14[13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm5[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1,2,3],xmm13[4],xmm14[5,6],xmm13[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm2[0,1,2],ymm13[3,4,5,6,7],ymm2[8,9,10],ymm13[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm13[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm26, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm20, %zmm31
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm12[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[1,1,1,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm10[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm13, %ymm13
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm21, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3],xmm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm8, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm2[1,2,3,4],ymm6[5,6],ymm2[7],ymm6[8],ymm2[9,10,11,12],ymm6[13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm10, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm7[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1,2,3,4],ymm2[5,6],ymm5[7],ymm2[8],ymm5[9,10,11,12],ymm2[13,14],ymm5[15]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm26, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm20, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm27, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm17
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm5, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm26
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10,11,12],ymm4[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm9 = mem[0,1],ymm4[2],mem[3,4],ymm4[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm4
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm2[1],ymm0[2,3,4,5],ymm2[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm8 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm30
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm21, %ymm4, %ymm0
 ; AVX512DQ-SLOW-NEXT:    movw $31, %ax
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm24 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[0,1,0,3,4,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm4[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3,4],ymm4[5],ymm2[6,7],ymm4[8],ymm2[9,10,11,12],ymm4[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm4 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm18
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm17
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm8 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm19
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm0, %ymm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm22 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0],ymm4[1],ymm0[2,3,4,5],ymm4[6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3],xmm4[4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm4 = mem[0],ymm4[1],mem[2,3],ymm4[4],mem[5,6],ymm4[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm4[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm2, %zmm0, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm10[2,1,2,1,6,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm10 = mem[0,1,0,3,4,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm10[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4],ymm2[5],ymm1[6,7],ymm2[8],ymm1[9,10,11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0,1,2,3],xmm5[4],xmm15[5],xmm5[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm5[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm16
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm14 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm15 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm12
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm21, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm6 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5],xmm5[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm25
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm25 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm11[4],xmm1[5],xmm11[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm11
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm14[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1],xmm3[2,3],xmm5[4],xmm3[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm14, %xmm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,1,1,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm15[4],xmm14[5],xmm15[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm14, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm14 = mem[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm15[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4],ymm15[5],ymm14[6,7],ymm15[8],ymm14[9,10,11,12],ymm15[13],ymm14[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm25[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm15[1],xmm9[2,3],xmm15[4],xmm9[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm14, %ymm23, %ymm9
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm9, %zmm0, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm14[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm14[1,2,3,4],ymm9[5],ymm14[6,7],ymm9[8],ymm14[9,10,11,12],ymm9[13],ymm14[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm14, %xmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,1,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5],xmm14[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm14[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpshufd $197, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm14 = mem[1,1,0,3,5,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm15[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm14[1,2,3,4],ymm15[5],ymm14[6,7],ymm15[8],ymm14[9,10,11,12],ymm15[13],ymm14[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm13[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm3[0],xmm11[1],xmm3[2,3],xmm11[4],xmm3[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,1,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm26
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm19[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm15[1],xmm8[2,3],xmm15[4],xmm8[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm14, %ymm23, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm13[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm12, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,1,1,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0,1,2,3],xmm1[4],xmm11[5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm8, %zmm0, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,3,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm10[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1,2,3,4],ymm1[5],ymm7[6,7],ymm1[8],ymm7[9,10,11,12],ymm1[13],ymm7[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm30[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm11[1],xmm8[2,3],xmm11[4],xmm8[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm0, %ymm13
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm21, %ymm13, %ymm8
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm8, %zmm0, %zmm26 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[0,1,1,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0,1,2,3],xmm8[4],xmm13[5],xmm8[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm11, %ymm14, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3],xmm12[4],xmm11[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm21, %ymm8, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm10[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm11, %zmm0, %zmm1 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, 64(%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, 64(%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, (%rdx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm2, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm6, %zmm2, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, 64(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm31, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, 64(%r9)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, (%r9)
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, 64(%rsi)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, 64(%rdx)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, (%rdx)
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm27, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm25, %zmm27, %zmm23
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm26, %zmm27, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm27, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm1, (%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, (%r8)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm23, 64(%r9)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, (%r9)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQ-SLOW-NEXT:    addq $968, %rsp # imm = 0x3C8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-SLOW-NEXT:    addq $840, %rsp # imm = 0x348
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride6_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1032, %rsp # imm = 0x408
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    subq $936, %rsp # imm = 0x3A8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512DQ-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm24
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,0,1,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm28
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 480(%rdi), %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,1,4,5,u,u,6,7,8,9,8,9,8,9,u,u,16,17,20,21,u,u,22,23,24,25,24,25,24,25,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm26
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm24
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm2[3,4,5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 640(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,8,9,u,u,u,u,u,u,u,u,4,5,u,u,u,u,24,25,u,u,u,u,u,u,u,u,20,21>
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm15, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm21
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 672(%rdi), %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 480(%rdi), %ymm2, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm2[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm4[1],ymm2[2,3,4,5],ymm4[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,12,13,8,9,12,13,8,9,12,13,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7],ymm2[8,9],ymm3[10],ymm2[11,12,13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm14, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm17
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm18, %zmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm19
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 640(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm2[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 672(%rdi), %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm5[1],ymm2[2,3,4,5],ymm5[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm4
 ; AVX512DQ-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm3
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm16
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm0[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1],ymm3[2],ymm1[3,4,5,6],ymm3[7],ymm1[8,9],ymm3[10],ymm1[11,12,13,14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm13
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm13[2],xmm0[3],xmm13[4,5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2],ymm3[3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm0[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, 288(%rdi), %ymm0, %ymm25
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm25[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1],ymm3[2],ymm0[3,4,5,6],ymm3[7],ymm0[8,9],ymm3[10],ymm0[11,12,13,14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm4[3,4,5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2],xmm0[3],xmm15[4,5],xmm0[6],xmm15[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7],ymm9[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1],xmm8[2],xmm0[3],xmm8[4,5],xmm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm3[3],xmm12[4,5],xmm3[6],xmm12[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm17, %zmm16, %zmm6
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3],xmm6[4,5],xmm5[6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,3,6,7,u,u,6,7,10,11,10,11,10,11,u,u,18,19,22,23,u,u,22,23,26,27,26,27,26,27,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm10[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4,5,6],ymm7[7],ymm4[8,9],ymm7[10],ymm4[11,12,13,14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2],xmm2[3],xmm7[4,5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm4[3,4,5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,10,11,u,u,u,u,u,u,u,u,6,7,u,u,u,u,26,27,u,u,u,u,u,u,u,u,22,23>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4,5,6],ymm7[7],ymm2[8,9],ymm7[10],ymm2[11,12,13,14],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0,1,2],ymm7[3,4,5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm18, %zmm8
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7],ymm2[8,9],ymm3[10],ymm2[11,12,13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm6[2],xmm3[3],xmm6[4,5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm2[3,4,5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4,5,6],ymm3[7],ymm2[8,9],ymm3[10],ymm2[11,12,13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm16, %zmm7
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm7 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2],ymm3[3,4,5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7],ymm2[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm18, %zmm6
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm4
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm4 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm29
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,2,0,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm20
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm9, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm27[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0],ymm3[1,2,3,4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9,10,11,12],ymm1[13,14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm31
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm21[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm8, %ymm23
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1,2,3,4],ymm4[5,6],ymm5[7],ymm4[8],ymm5[9,10,11,12],ymm4[13,14],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = ymm5[0,1],mem[2],ymm5[3,4],mem[5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm5[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm20
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm8[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm22
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm8[4],xmm5[5,6],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0,1,2],ymm5[3,4,5,6,7],ymm4[8,9,10],ymm5[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm24, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm18
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm18
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm7[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm17
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm8, %ymm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm21, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm4[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1,2],xmm0[3],xmm3[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm15 = xmm1[0,1,2,3],xmm15[4],xmm1[5,6],xmm15[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm11
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm11[0,1,2],ymm15[3,4,5,6,7],ymm11[8,9,10],ymm15[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,6,5,4]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm0[1,2],xmm11[3],xmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm11, %xmm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1,2],xmm13[3],xmm15[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3],xmm11[4],xmm13[5,6],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm14
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3,4,5,6,7],ymm14[8,9,10],ymm11[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm29
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm20, %zmm21, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1,2],xmm10[3],xmm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5,6],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm27
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm26
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm15, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm25[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm27, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,3,2,3,4,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm11, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm7[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm14[4],xmm0[5,6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm21, %zmm24, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm29
-; AVX512DQ-FAST-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm1 = mem[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm2[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm16
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0],ymm0[1,2,3,4],ymm13[5,6],ymm0[7],ymm13[8],ymm0[9,10,11,12],ymm13[13,14],ymm0[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0],xmm1[1,2],xmm12[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm12[0],ymm1[1,2,3,4],ymm12[5,6],ymm1[7],ymm12[8],ymm1[9,10,11,12],ymm12[13,14],ymm1[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm9, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3],xmm13[4],xmm12[5,6],xmm13[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm1[0,1,2],ymm12[3,4,5,6,7],ymm1[8,9,10],ymm12[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm14, %zmm24, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm10, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm10[4],xmm1[5,6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm9
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1,2],xmm9[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1,2,3,4],ymm9[5,6],ymm8[7],ymm9[8],ymm8[9,10,11,12],ymm9[13,14],ymm8[15]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm8[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4],ymm2[5,6],ymm3[7],ymm2[8],ymm3[9,10,11,12],ymm2[13,14],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm22
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm21, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, (%rsp), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm12 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, (%rsp) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm27
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm3
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm26
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm22
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm11 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm3, %ymm1
 ; AVX512DQ-FAST-NEXT:    movw $31, %ax
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm3, %zmm0, %zmm23 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0],ymm3[1],mem[2,3],ymm3[4],mem[5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm24
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm10[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm25[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm30
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm28
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3,4],ymm3[5],ymm1[6,7],ymm3[8],ymm1[9,10,11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm9 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm4
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm21
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm17, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm22 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm20
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2,3],xmm5[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $146, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm27
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm5[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm19
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5],xmm5[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm16
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm16 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm13 = mem[0,1,0,3,4,5,4,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm8[2,1,2,1,6,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm10[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4],ymm2[5],ymm0[6,7],ymm2[8],ymm0[9,10,11,12],ymm2[13],ymm0[14,15]
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0],ymm1[1],mem[2,3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm6[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm14 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm17
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0],ymm2[1],mem[2,3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm24
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm24 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0],ymm8[1,2,3,4],ymm0[5],ymm8[6,7],ymm0[8],ymm8[9,10,11,12],ymm0[13],ymm8[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm15, %ymm17, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm15 = xmm14[0],xmm11[1],xmm14[2,3],xmm11[4],xmm14[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm12 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0,1,2,3],xmm1[4],xmm12[5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3],xmm8[4],xmm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0,1,2,3],xmm4[4],xmm15[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm26
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm8, %zmm0, %zmm26 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0,1,2,3],xmm4[4],xmm8[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm15
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3,4],ymm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm15, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5],xmm3[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm12[1,2,3,4],ymm0[5],ymm12[6,7],ymm0[8],ymm12[9,10,11,12],ymm0[13],ymm12[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm15[0,1,2,3],xmm12[4],xmm15[5],xmm12[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm12[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm12
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm15, %ymm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm15[1,2,3,4],ymm12[5],ymm15[6,7],ymm12[8],ymm15[9,10,11,12],ymm12[13],ymm15[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm9, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm15[1],xmm9[2,3],xmm15[4],xmm9[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm12, %ymm17, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3],xmm8[4],xmm2[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0,1,2,3],xmm3[4],xmm8[5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm9, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm13, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm6[1,2,3,4],ymm3[5],ymm6[6,7],ymm3[8],ymm6[9,10,11,12],ymm3[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, 64(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, 64(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm4, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm4, %zmm24
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm16, %zmm4, %zmm25
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 64(%rdx)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm2, %zmm23
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm24, %zmm2, %zmm25
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, 64(%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, (%rcx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 64(%r8)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, (%r8)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 64(%r8)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, (%r8)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $1032, %rsp # imm = 0x408
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512DQ-FAST-NEXT:    addq $936, %rsp # imm = 0x3A8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
index 5da2a75454b5c..07a51fb5a6ea8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-5.ll
@@ -458,7 +458,7 @@ define void @load_i32_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[0,1,2,0,4,5,6,4]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm10[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vextractf128 $1, %ymm2, %xmm2
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],mem[1],xmm2[2,3]
@@ -502,7 +502,7 @@ define void @load_i32_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm8, %ymm7, %ymm7
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 96(%rdi), %ymm0, %ymm8
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1,2],ymm8[3],ymm3[4,5,6],ymm8[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1,2,3,4,5,6],ymm8[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5],ymm7[6,7]
 ; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [21474836480,21474836480,21474836480,21474836480]
@@ -517,7 +517,7 @@ define void @load_i32_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5],ymm10[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm3[0,1],ymm4[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <4,1,6,u>
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm4, %ymm1
@@ -924,7 +924,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[0,1,2,0,4,5,6,4]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5],ymm5[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
@@ -934,7 +934,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0],ymm11[1],ymm2[2,3,4],ymm11[5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm11[5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm12[4,5],ymm7[6,7]
 ; AVX1-ONLY-NEXT:    vextractf128 $1, %ymm4, %xmm4
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm4[0],mem[1],xmm4[2,3]
@@ -1012,7 +1012,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1,2,3],ymm5[4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm14, %ymm13
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 96(%rdi), %ymm0, %ymm15
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm15 = ymm8[0,1,2],ymm15[3],ymm8[4,5,6],ymm15[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm15 = ymm8[0,1,2,3,4,5,6],ymm15[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm15[3,4,5],ymm13[6,7]
 ; AVX2-ONLY-NEXT:    vpbroadcastq {{.*#+}} ymm15 = [21474836480,21474836480,21474836480,21474836480]
@@ -1021,7 +1021,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm7, %ymm14, %ymm7
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 256(%rdi), %ymm0, %ymm14
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3],ymm1[4,5,6],ymm14[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3,4,5,6],ymm14[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm14[3,4,5],ymm7[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm14
@@ -1043,7 +1043,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm14, %ymm15, %ymm12
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm8[0,1],ymm9[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3,4],ymm9[5],ymm8[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5],ymm5[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,1,6,u>
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm5, %ymm3
@@ -1054,7 +1054,7 @@ define void @load_i32_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm5, %ymm3
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm14, %ymm8, %ymm2
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
@@ -1959,8 +1959,8 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
+; AVX1-ONLY-NEXT:    vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0,1,2,3,4],mem[5],ymm7[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm15 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm15 = mem[0,1,2,3],ymm3[4,5],mem[6,7]
@@ -1971,7 +1971,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # ymm3 = mem[0,1,2,3,4,5,6],ymm6[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm2[1],ymm6[2,3,4],ymm2[5],ymm6[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm2[5],ymm6[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
@@ -1982,7 +1982,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2,3,4,5,6],ymm8[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0],ymm1[1],ymm5[2,3,4],ymm1[5],ymm5[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm1[5],ymm5[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm6 = mem[0,1,2,3],ymm6[4,5],mem[6,7]
@@ -1992,7 +1992,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1,2,3,4,5,6],ymm4[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm10[1],ymm4[2,3,4],ymm10[5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm10[5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm5 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
@@ -2161,7 +2161,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3],ymm12[4,5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm13, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 256(%rdi), %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6],ymm2[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5,6],ymm2[7]
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm5, %ymm7
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
@@ -2174,7 +2174,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm9[4,5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm13, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 576(%rdi), %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3],ymm6[4,5,6],ymm2[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5,6],ymm2[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 608(%rdi), %ymm9
@@ -2189,7 +2189,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 96(%rdi), %ymm0, %ymm4
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm14, %ymm2
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0,1,2],ymm4[3],ymm14[4,5,6],ymm4[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0,1,2,3,4,5,6],ymm4[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm14
@@ -2202,7 +2202,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm13, %ymm4
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 416(%rdi), %ymm0, %ymm8
 ; AVX2-ONLY-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1,2],ymm8[3],ymm1[4,5,6],ymm8[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1,2,3,4,5,6],ymm8[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1,2],ymm8[3,4,5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm4
@@ -2258,7 +2258,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[0,1],ymm13[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm13[1],ymm7[2,3,4],ymm13[5],ymm7[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm13[5],ymm7[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,1,6,u>
 ; AVX2-ONLY-NEXT:    vpermd %ymm0, %ymm8, %ymm0
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3,4,5],ymm0[6,7]
@@ -2268,7 +2268,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm6[4,5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm9 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm9 = mem[0,1],ymm12[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4],ymm12[5],ymm9[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm12[5],ymm9[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm8, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm2 # 32-byte Folded Reload
@@ -2276,7 +2276,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, (%rsp), %ymm5, %ymm9 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm9 = mem[0,1],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0],ymm5[1],ymm9[2,3,4],ymm5[5],ymm9[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm5[5],ymm9[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm8, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm9[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm7, %ymm1
@@ -2284,7 +2284,7 @@ define void @load_i32_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm3 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm3 = mem[0,1],ymm4[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm4[5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vpblendd $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm5 = ymm4[0,1,2,3],mem[4,5],ymm4[6,7]
@@ -4116,7 +4116,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} ymm15 = ymm15[0,1,2,0,4,5,6,4]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0,1,2,3,4,5],ymm15[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5],mem[6,7]
@@ -4127,7 +4127,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm9[1],ymm3[2,3,4],ymm9[5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm9[5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm4 = ymm4[0,1,2,3],mem[4,5],ymm4[6,7]
@@ -4139,7 +4139,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # ymm4 = mem[0,1,2,3,4,5,6],ymm1[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm2[1],ymm4[2,3,4],ymm2[5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm2[5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5],mem[6,7]
@@ -4150,8 +4150,8 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm4 = ymm4[0],mem[1],ymm4[2,3,4],mem[5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm4 = ymm4[0,1,2,3,4],mem[5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
@@ -4163,7 +4163,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # ymm6 = mem[0,1,2,3,4,5,6],ymm2[7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm5[5],ymm6[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5],mem[6,7]
@@ -4175,8 +4175,8 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm5 = ymm5[0],mem[1],ymm5[2,3,4],mem[5],ymm5[6,7]
+; AVX1-ONLY-NEXT:    vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm5 = ymm5[0,1,2,3,4],mem[5],ymm5[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm6 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
@@ -4189,7 +4189,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm14[5],ymm6[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2,3],ymm7[4,5],mem[6,7]
@@ -4202,8 +4202,8 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0],mem[1],ymm7[2,3,4],mem[5],ymm7[6,7]
+; AVX1-ONLY-NEXT:    vblendps $32, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0,1,2,3,4],mem[5],ymm7[6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm8 = ymm8[0,1,2,3],mem[4,5],ymm8[6,7]
@@ -4521,7 +4521,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm0, %ymm2, %ymm0
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 256(%rdi), %ymm0, %ymm1
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm10, %ymm4
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3],ymm10[4,5,6],ymm1[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3,4,5,6],ymm1[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm3
@@ -4535,7 +4535,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    # ymm1 = ymm10[0,1,2,3],mem[4,5],ymm10[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 576(%rdi), %ymm0, %ymm3
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2],ymm3[3],ymm14[4,5,6],ymm3[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1,2,3,4,5,6],ymm3[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 608(%rdi), %ymm3
@@ -4549,7 +4549,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 896(%rdi), %ymm0, %ymm3
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3],ymm5[4,5,6],ymm3[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5,6],ymm3[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 928(%rdi), %ymm3
@@ -4562,7 +4562,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 1216(%rdi), %ymm0, %ymm3
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1,2],ymm3[3],ymm9[4,5,6],ymm3[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1,2,3,4,5,6],ymm3[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 1248(%rdi), %ymm3
@@ -4573,7 +4573,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm8[4,5],ymm13[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 1056(%rdi), %ymm0, %ymm3
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0,1,2],ymm3[3],ymm15[4,5,6],ymm3[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0,1,2,3,4,5,6],ymm3[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 1088(%rdi), %ymm8
@@ -4586,8 +4586,8 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3],mem[4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 96(%rdi), %ymm0, %ymm3
-; AVX2-ONLY-NEXT:    vpblendd $119, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-ONLY-NEXT:    # ymm3 = mem[0,1,2],ymm3[3],mem[4,5,6],ymm3[7]
+; AVX2-ONLY-NEXT:    vpblendd $127, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    # ymm3 = mem[0,1,2,3,4,5,6],ymm3[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm1
@@ -4598,8 +4598,8 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    # ymm3 = mem[0,1,2,3],ymm11[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm2, %ymm3
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 416(%rdi), %ymm0, %ymm15
-; AVX2-ONLY-NEXT:    vpblendd $119, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
-; AVX2-ONLY-NEXT:    # ymm15 = mem[0,1,2],ymm15[3],mem[4,5,6],ymm15[7]
+; AVX2-ONLY-NEXT:    vpblendd $127, (%rsp), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    # ymm15 = mem[0,1,2,3,4,5,6],ymm15[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm15 = ymm3[0,1,2],ymm15[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 448(%rdi), %ymm3
@@ -4613,7 +4613,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm13, %ymm2, %ymm2
 ; AVX2-ONLY-NEXT:    vinserti128 $1, 736(%rdi), %ymm0, %ymm13
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3],ymm14[4,5,6],ymm13[7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
 ; AVX2-ONLY-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,3,0,1,6,7,4,5]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1,2],ymm13[3,4,5],ymm2[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 768(%rdi), %ymm2
@@ -4713,7 +4713,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm10[0,1],ymm7[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3,4],ymm7[5],ymm0[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm7[5],ymm0[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <4,1,6,u>
 ; AVX2-ONLY-NEXT:    vpermd %ymm4, %ymm7, %ymm4
@@ -4727,14 +4727,14 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm4 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm4 = mem[0,1],ymm10[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm10[1],ymm4[2,3,4],ymm10[5],ymm4[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm10[5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm7, %ymm1
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm4 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3,4,5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm9[0,1],ymm8[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3,4],ymm8[5],ymm4[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm8[5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm1, %ymm7, %ymm1
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3,4,5],ymm1[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm5, %ymm6, %ymm3
@@ -4745,7 +4745,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm4 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm4 = mem[0,1],ymm8[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3,4],ymm8[5],ymm4[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm8[5],ymm4[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm7, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm4 # 32-byte Folded Reload
@@ -4753,7 +4753,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm14[4,5],ymm11[6,7]
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm8 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm8 = mem[0,1],ymm12[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2,3,4],ymm12[5],ymm8[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm12[5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm7, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm2 # 32-byte Folded Reload
@@ -4764,7 +4764,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm8 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm8 = mem[0,1],ymm10[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2,3,4],ymm10[5],ymm8[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm10[5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm3, %ymm7, %ymm3
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3,4,5],ymm3[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm8 # 32-byte Folded Reload
@@ -4775,7 +4775,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm10 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm10 = mem[0,1],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0],ymm5[1],ymm10[2,3,4],ymm5[5],ymm10[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm5[5],ymm10[6,7]
 ; AVX2-ONLY-NEXT:    vpermd %ymm8, %ymm7, %ymm8
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3,4,5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm9 # 32-byte Folded Reload
@@ -4787,7 +4787,7 @@ define void @load_i32_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vperm2i128 $2, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm9 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm9 = mem[0,1],ymm10[0,1]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5],ymm9[6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4,5],ymm7[6,7]
 ; AVX2-ONLY-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
index 8c0cc12f8429f..6daf83dbb7e91 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll
@@ -623,29 +623,26 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm5
 ; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm6
+; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm2 = <0,6,4,u>
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm6[0,1],ymm5[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [8589934596,8589934596,8589934596,8589934596]
 ; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm10, %ymm10
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm10 = <1,7,5,u>
-; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm10, %ymm7
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2,3,4],ymm10[5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm10, %ymm6
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5],ymm6[6,7]
 ; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm8 = [12884901893,12884901893,12884901893,12884901893]
 ; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm8, %ymm8
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm3[2,3,2,3,6,7,6,7]
@@ -659,7 +656,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm6[1],ymm9[2,3,4],ymm6[5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm3[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
@@ -672,20 +669,20 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 80(%rdi), %xmm10
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm5[0,1],ymm10[2,3],ymm5[4,5],ymm10[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,1,0,2,4,5,4,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm11 = <4,2,u,u>
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm11, %ymm4
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm1, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6],ymm10[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm5 = <5,3,u,u>
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm5, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
@@ -694,7 +691,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm4, %ymm0
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX2-SLOW-NEXT:    vmovaps %ymm7, (%rdx)
+; AVX2-SLOW-NEXT:    vmovaps %ymm6, (%rdx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm8, (%rcx)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%r8)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, (%r9)
@@ -710,29 +707,26 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm5
 ; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm6
+; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm7
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm2 = <0,6,4,u>
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm2, %ymm2
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm6[0,1],ymm5[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [8589934596,8589934596,8589934596,8589934596]
 ; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm10, %ymm10
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm10 = <1,7,5,u>
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm10, %ymm7
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2,3,4],ymm10[5],ymm8[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5],ymm7[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm10, %ymm6
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5],ymm6[6,7]
 ; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm8 = [12884901893,12884901893,12884901893,12884901893]
 ; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm8, %ymm8
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm9 = <u,u,u,4,2,u,u,u>
 ; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm9, %ymm8
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm3[2,3,2,3,6,7,6,7]
@@ -746,7 +740,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm6[1],ymm9[2,3,4],ymm6[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm10 = ymm3[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
@@ -759,20 +753,20 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps 80(%rdi), %xmm10
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm5[0,1],ymm10[2,3],ymm5[4,5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,1,0,2,4,5,4,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm11 = <4,2,u,u>
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm11, %ymm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6],ymm10[7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm5 = <5,3,u,u>
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm5, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
@@ -781,7 +775,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm4, %ymm0
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX2-FAST-NEXT:    vmovaps %ymm7, (%rdx)
+; AVX2-FAST-NEXT:    vmovaps %ymm6, (%rdx)
 ; AVX2-FAST-NEXT:    vmovaps %ymm8, (%rcx)
 ; AVX2-FAST-NEXT:    vmovaps %ymm9, (%r8)
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, (%r9)
@@ -797,29 +791,26 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm2 = <0,6,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm2, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm6[0,1],ymm5[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm5[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm8[0,2,2,2,4,6,6,6]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3,4,5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [8589934596,8589934596,8589934596,8589934596]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm10, %ymm10
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm10[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm10 = <1,7,5,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm10, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2,3,4],ymm10[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3,4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm10, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5],ymm6[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm8 = [12884901893,12884901893,12884901893,12884901893]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm8, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm8 = ymm8[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm3[2,3,2,3,6,7,6,7]
@@ -833,7 +824,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm6[1],ymm9[2,3,4],ymm6[5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm7[1],ymm9[2,3,4],ymm7[5],ymm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm3[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm11 = ymm4[0,1,3,3,4,5,7,7]
@@ -846,20 +837,20 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm10 = ymm10[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rdi), %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm5[0,1],ymm10[2,3],ymm5[4,5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[0,1,0,2,4,5,4,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm11 = <4,2,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm11, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [0,2,0,6,0,2,0,6]
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm1, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm4 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6],ymm10[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm10[3],ymm4[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm5 = <5,3,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm5, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
@@ -868,7 +859,7 @@ define void @load_i32_stride6_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm4, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, (%rdx)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, (%rdx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm8, (%rcx)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm9, (%r8)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, (%r9)
@@ -1424,151 +1415,146 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    subq $264, %rsp # imm = 0x108
 ; AVX2-SLOW-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm7
-; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm5
-; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovaps 224(%rdi), %ymm10
+; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm11
+; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm3
+; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm7
+; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm14 = <0,6,4,u>
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm14, %ymm0
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm3[0,1],ymm5[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm12 = ymm11[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm7[4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm6, %ymm13
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm4
+; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm1 = <0,6,4,u>
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm1, %ymm9
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm5[0,1],ymm7[0,1]
+; AVX2-SLOW-NEXT:    vmovaps %ymm5, %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm13[3,4,5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-SLOW-NEXT:    vpermps %ymm13, %ymm0, %ymm14
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm10[4,5],ymm11[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm1, %ymm14
+; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm5[0,1],ymm8[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 320(%rdi), %ymm14
+; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm15
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm9[0,1],ymm8[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 224(%rdi), %ymm15
-; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm4
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm15[4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm14, %ymm14
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 320(%rdi), %ymm13
-; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm14
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm6, %ymm6
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm6 = <1,7,5,u>
-; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm6, %ymm10
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5],ymm10[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm10, %ymm11
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm11[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm6, %ymm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm8[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm10, %ymm1
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm0, %ymm3
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm12[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3,4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-SLOW-NEXT:    vpermps %ymm13, %ymm6, %ymm12
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm12[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm6, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm7[2,3],ymm4[4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vmovaps %ymm4, %ymm12
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm8[2,3],ymm5[4,5],ymm8[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vmovaps %ymm3, %ymm11
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vmovaps %ymm7, %ymm10
+; AVX2-SLOW-NEXT:    vmovaps %ymm3, %ymm9
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm4[4,5],ymm15[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 272(%rdi), %xmm3
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm4 = <4,2,u,u>
 ; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm4, %ymm8
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
-; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = [0,2,0,6,0,2,0,6]
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm9, %ymm13
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 80(%rdi), %xmm13
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm13[2,3],ymm5[4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
+; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm10, %ymm11
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm7 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm7 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 80(%rdi), %xmm11
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm11[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm4, %ymm4
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm9, %ymm9
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm9[0,1],ymm6[2,3],ymm9[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm10, %ymm10
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm3 = <5,3,u,u>
 ; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm3, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
@@ -1577,7 +1563,7 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm2, %ymm8
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm13[3],ymm5[4,5,6],ymm13[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm3, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm2, %ymm2
@@ -1611,152 +1597,147 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST:       # %bb.0:
 ; AVX2-FAST-NEXT:    subq $264, %rsp # imm = 0x108
 ; AVX2-FAST-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm9
-; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm7
-; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm5
-; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovaps 224(%rdi), %ymm10
+; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm11
+; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm7
+; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm4
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = <0,6,4,u>
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm1, %ymm9
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm4[0,1],ymm5[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm13[3,4,5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0,1,2,3],ymm7[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-FAST-NEXT:    vpermps %ymm13, %ymm0, %ymm14
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm14 = <0,6,4,u>
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm14, %ymm0
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm2[0,1],ymm5[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm12 = ymm11[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm13 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3],ymm3[4,5],ymm7[6,7]
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm6, %ymm13
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm10[4,5],ymm11[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm1, %ymm14
+; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm5[0,1],ymm8[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vmovaps 320(%rdi), %ymm14
+; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm15
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm9[0,1],ymm8[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm13 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 224(%rdi), %ymm15
-; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm15[4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm14, %ymm14
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vmovaps 320(%rdi), %ymm13
-; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm14
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm6, %ymm6
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm6 = <1,7,5,u>
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm6, %ymm10
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm10, %ymm11
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm11[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm6, %ymm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm8[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm0, %ymm3
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm12[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3,4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-FAST-NEXT:    vpermps %ymm13, %ymm6, %ymm12
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm12[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm6, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm13[2,3],ymm4[4,5],ymm13[6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm2 = [2,0,6,4,2,0,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm7, %ymm10
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[0,0,0,0,4,4,4,4]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm7[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm8[2,3],ymm5[4,5],ymm8[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm2, %ymm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm11, %ymm12
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovaps %ymm13, %ymm12
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps %ymm4, %ymm9
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vmovaps %ymm10, %ymm11
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vmovaps %ymm6, %ymm13
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm7, %ymm10
+; AVX2-FAST-NEXT:    vmovaps %ymm7, %ymm6
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm4[4,5],ymm15[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps 272(%rdi), %xmm3
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm4 = <4,2,u,u>
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm4, %ymm8
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
-; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = [0,2,0,6,0,2,0,6]
-; AVX2-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm9, %ymm13
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm7 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
-; AVX2-FAST-NEXT:    vmovaps 80(%rdi), %xmm13
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm13[2,3],ymm5[4,5],ymm13[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
+; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm10, %ymm11
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm12[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm7 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
+; AVX2-FAST-NEXT:    vmovaps 80(%rdi), %xmm11
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm11[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm4, %ymm4
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm9, %ymm9
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm13[0,1],ymm6[2,3],ymm13[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm10, %ymm10
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm3 = <5,3,u,u>
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm3, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
@@ -1765,7 +1746,7 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm2, %ymm8
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm13[3],ymm5[4,5,6],ymm13[7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm3, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm2, %ymm2
@@ -1799,151 +1780,146 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE:       # %bb.0:
 ; AVX2-FAST-PERLANE-NEXT:    subq $264, %rsp # imm = 0x108
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 224(%rdi), %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm14 = <0,6,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm14, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm3[0,1],ymm5[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm12 = ymm11[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm5[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm7[4,5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm6, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm1 = <0,6,4,u>
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm1, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm5[0,1],ymm7[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm5, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm12[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2],ymm13[3,4,5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm13, %ymm0, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm10[4,5],ymm11[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm1, %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm5[0,1],ymm8[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm15[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm15 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 320(%rdi), %ymm14
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm15
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm14[4,5],ymm15[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm9[0,1],ymm8[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 224(%rdi), %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm15[4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm14, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 320(%rdi), %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm6, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm6 = <1,7,5,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm6, %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3,4,5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm10, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm11[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm6, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm8[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm10, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm0, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm12[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm6[3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm6 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm13, %ymm6, %ymm12
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm12[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm6, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm7[2,3],ymm4[4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, %ymm12
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm8[2,3],ymm5[4,5],ymm8[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm4[4,5],ymm15[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 272(%rdi), %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm4 = <4,2,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm4, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm14[0,1],ymm13[2,3],ymm14[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = [0,2,0,6,0,2,0,6]
-; AVX2-FAST-PERLANE-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm9, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm13[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm7 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm7 = ymm12[0,1,2,3],mem[4,5],ymm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rdi), %xmm13
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm13[2,3],ymm5[4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-FAST-PERLANE-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm10, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm11[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm7 = ymm13[0,1,2,3],mem[4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rdi), %xmm11
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm5[0,1],ymm11[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm4, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm9, %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm9[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm9[0,1],ymm6[2,3],ymm9[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm10, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm10[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm3 = <5,3,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm3, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
@@ -1952,7 +1928,7 @@ define void @load_i32_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm2, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm13[3],ymm5[4,5,6],ymm13[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm11[3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm3, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm2, %ymm2
@@ -3236,173 +3212,157 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX2-SLOW-LABEL: load_i32_stride6_vf32:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $1160, %rsp # imm = 0x488
+; AVX2-SLOW-NEXT:    subq $1192, %rsp # imm = 0x4A8
 ; AVX2-SLOW-NEXT:    vmovaps 480(%rdi), %ymm6
 ; AVX2-SLOW-NEXT:    vmovaps 448(%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 416(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %ymm4
 ; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm11
+; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm3
+; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm12 = <0,6,4,u>
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm12, %ymm0
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm2[0,1],ymm11[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm13 = <0,6,4,u>
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm13, %ymm1
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm5[0,1],ymm3[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3],ymm2[4,5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm2, %ymm5
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm6[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 416(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm7[0,1],ymm6[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 384(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 384(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm12, %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm8[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm13, %ymm5
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm7[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm8[3,4,5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 512(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 544(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 544(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 288(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm2, %ymm8
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm8[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm0[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 288(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm8[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 224(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm12, %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm13, %ymm1
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm15 = ymm11[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 320(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm2, %ymm8
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 608(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm3, %ymm14
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 576(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 672(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovaps 640(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm5, %ymm13, %ymm8
+; AVX2-SLOW-NEXT:    vmovaps 672(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm1[0,1],ymm0[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm15 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vmovaps %ymm0, %ymm14
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 608(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovaps 640(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 576(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm12, %ymm12
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm14 = ymm13[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm14[3,4,5],ymm8[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 704(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 736(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm3, %ymm0
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm1 = <1,7,5,u>
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5],ymm10[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm10, %ymm9
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2,3,4],ymm8[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm1, %ymm7
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5],ymm7[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm10, %ymm6
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm2, %ymm0
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm5 = mem[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm1, %ymm4
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm0, %ymm8
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm9 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm9, %ymm10
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm0, %ymm6
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm9, %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm11[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm9, %ymm3
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm5, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,3,2,3,5,7,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm12, %ymm10, %ymm1
+; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm9, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm11, %ymm8
-; AVX2-SLOW-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1],ymm11[2,3],ymm7[4,5],ymm11[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,0,0,0,4,4,4,4]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm9[2,3],ymm12[4,5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -3414,8 +3374,9 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -3428,20 +3389,20 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $224, (%rsp), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm13[2,3],ymm9[4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -3449,62 +3410,63 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm8 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm10[1],ymm1[2,3,4],ymm10[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vmovaps %ymm4, %ymm5
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps %ymm12, %ymm13
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovaps %ymm14, %ymm8
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -3516,95 +3478,99 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm11[0,1,2,3],ymm7[4,5],ymm11[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm12 = ymm12[0,1,2,3],mem[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovaps 464(%rdi), %xmm0
+; AVX2-SLOW-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1,2,3],ymm6[4,5],mem[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 464(%rdi), %xmm1
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm2 = <4,2,u,u>
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm11 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm11 = mem[0,1],ymm10[2,3],mem[4,5,6,7]
-; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [0,2,0,6,0,2,0,6]
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm7, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm2, %ymm4
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, %ymm6
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,3],ymm14[4,5,6,7]
+; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm10, %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm9[0,1,2,3],ymm13[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovaps 272(%rdi), %xmm9
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm9[2,3],ymm13[4,5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm10 = ymm8[0,1],mem[2,3],ymm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm7, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm14[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovaps 656(%rdi), %xmm8
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm8[2,3],ymm3[4,5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm2, %ymm14
-; AVX2-SLOW-NEXT:    vmovaps %ymm2, %ymm4
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm7, %ymm15
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm15[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovaps 272(%rdi), %xmm13
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,0,2,4,5,4,6]
+; AVX2-SLOW-NEXT:    vmovaps %ymm6, %ymm1
+; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm6, %ymm6
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm5, %ymm10, %ymm8
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm8[5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovaps 656(%rdi), %xmm6
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm6[2,3],ymm12[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[0,1,0,2,4,5,4,6]
+; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm1, %ymm11
+; AVX2-SLOW-NEXT:    vmovaps %ymm1, %ymm3
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm15 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm10, %ymm11
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4],ymm11[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm9 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm4, %ymm6
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm7, %ymm7
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm0[2,3],ymm9[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,1,0,2,4,5,4,6]
+; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm3, %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm10, %ymm10
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3,4],ymm10[5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = mem[1,1,1,1,5,5,5,5]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm4 = <5,3,u,u>
+; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1],ymm3[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = [0,3,1,7,0,3,1,7]
+; AVX2-SLOW-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm14, %ymm7
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm13[3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm5, %ymm14, %ymm5
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm12[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm5 = ymm5[0,1,2],mem[3],ymm5[4,5,6],mem[7]
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm6 = <5,3,u,u>
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1],ymm5[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = [0,3,1,7,0,3,1,7]
-; AVX2-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm12, %ymm11
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm11[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm11 = ymm13[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3],ymm11[4,5,6],ymm9[7]
-; AVX2-SLOW-NEXT:    vpermps (%rsp), %ymm6, %ymm11 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm12, %ymm4
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3],ymm3[4,5,6],ymm8[7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm8 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm12, %ymm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm15[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm6, %ymm1
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm4, %ymm6
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm15, %ymm14, %ymm6
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3],ymm6[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm4, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm12, %ymm1
+; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm14, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, 96(%rsi)
@@ -3640,183 +3606,167 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, (%r8)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, 96(%r9)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, 32(%r9)
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovaps %ymm1, 64(%r9)
-; AVX2-SLOW-NEXT:    vmovaps %ymm7, (%r9)
+; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%r9)
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovaps %ymm2, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovaps %ymm5, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovaps %ymm5, 96(%rax)
+; AVX2-SLOW-NEXT:    vmovaps %ymm2, 32(%rax)
+; AVX2-SLOW-NEXT:    vmovaps %ymm3, 64(%rax)
 ; AVX2-SLOW-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-SLOW-NEXT:    addq $1160, %rsp # imm = 0x488
+; AVX2-SLOW-NEXT:    addq $1192, %rsp # imm = 0x4A8
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
 ; AVX2-FAST-LABEL: load_i32_stride6_vf32:
 ; AVX2-FAST:       # %bb.0:
-; AVX2-FAST-NEXT:    subq $1192, %rsp # imm = 0x4A8
+; AVX2-FAST-NEXT:    subq $1160, %rsp # imm = 0x488
 ; AVX2-FAST-NEXT:    vmovaps 480(%rdi), %ymm6
 ; AVX2-FAST-NEXT:    vmovaps 448(%rdi), %ymm7
-; AVX2-FAST-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovups %ymm7, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 416(%rdi), %ymm8
+; AVX2-FAST-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm4
 ; AVX2-FAST-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm11
+; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm12 = <0,6,4,u>
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm12, %ymm0
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm2[0,1],ymm11[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm13 = <0,6,4,u>
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm13, %ymm1
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm5[0,1],ymm3[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3],ymm2[4,5],ymm4[6,7]
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm2, %ymm5
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm5[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm6[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 416(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm7[0,1],ymm6[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vmovaps 384(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 384(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm12, %ymm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm8[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm13, %ymm5
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm8 = ymm7[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm8[3,4,5],ymm5[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 512(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 544(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm3, %ymm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 544(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 288(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm2, %ymm8
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 288(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm8[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 224(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm12, %ymm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm13, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm15 = ymm11[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 320(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm2, %ymm8
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 608(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm3, %ymm14
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vmovaps 576(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 672(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovaps 640(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm13, %ymm8
+; AVX2-FAST-NEXT:    vmovaps 672(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm15 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vmovaps %ymm0, %ymm14
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 608(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 576(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm12, %ymm12
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
+; AVX2-FAST-NEXT:    vmovaps 640(%rdi), %ymm0
+; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm14 = ymm13[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm14[3,4,5],ymm8[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 704(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 736(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm3, %ymm0
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = <1,7,5,u>
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm10, %ymm9
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm2, %ymm0
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm8 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2,3,4],ymm8[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm1, %ymm7
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5],ymm7[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm10, %ymm6
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm5 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm1, %ymm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm1, %ymm0
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm0, %ymm8
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm9 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm9, %ymm10
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm0, %ymm6
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm9, %ymm4
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm11[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm9, %ymm3
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,3,2,3,5,7,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm9, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovaps %ymm11, %ymm5
-; AVX2-FAST-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm2 = [2,0,6,4,2,0,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm7[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm8[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm14[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -3830,11 +3780,12 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpermilps $224, (%rsp), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -3846,187 +3797,183 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1],ymm13[2,3],ymm4[4,5],ymm13[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm13[2,3],ymm6[4,5],ymm13[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm2, %ymm0
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm14[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm11 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps %ymm14, %ymm8
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm14[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm9, %ymm6
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm13, %ymm9
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm4, %ymm13
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm6[1],ymm1[2,3,4],ymm6[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm3 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm7[0,1,2,3],mem[4,5],ymm7[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,2,3],ymm10[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 464(%rdi), %xmm0
+; AVX2-FAST-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = mem[0,1,2,3],ymm14[4,5],mem[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm11[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovaps 464(%rdi), %xmm1
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm1[2,3],ymm14[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm2 = <4,2,u,u>
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3],ymm12[4,5,6,7]
-; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [0,2,0,6,0,2,0,6]
-; AVX2-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm7, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm2, %ymm4
+; AVX2-FAST-NEXT:    vmovaps %ymm2, %ymm11
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7]
+; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm10, %ymm4
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm8[4,5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm9[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovaps 272(%rdi), %xmm9
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm9[2,3],ymm13[4,5],ymm9[6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm10 = mem[0,1],ymm1[2,3],mem[4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm7, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm13[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovaps 272(%rdi), %xmm13
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,0,2,4,5,4,6]
+; AVX2-FAST-NEXT:    vmovaps %ymm11, %ymm1
+; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm11, %ymm6
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm10, %ymm8
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm8[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm14[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovaps 656(%rdi), %xmm8
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm8[2,3],ymm3[4,5],ymm8[6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm2, %ymm14
-; AVX2-FAST-NEXT:    vmovaps %ymm2, %ymm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm7, %ymm15
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm15[5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm3[4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm12[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovaps 656(%rdi), %xmm6
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm6[2,3],ymm12[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[0,1,0,2,4,5,4,6]
+; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm11, %ymm11
+; AVX2-FAST-NEXT:    vmovaps %ymm1, %ymm3
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm10, %ymm11
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4],ymm11[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm9 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm4, %ymm6
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm7, %ymm7
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm5 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm5 = ymm5[0,1,2],mem[3],ymm5[4,5,6],mem[7]
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm6 = <5,3,u,u>
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1],ymm5[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = [0,3,1,7,0,3,1,7]
-; AVX2-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm12, %ymm11
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm11[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm11 = ymm13[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3],ymm11[4,5,6],ymm9[7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm12, %ymm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3],ymm3[4,5,6],ymm8[7]
-; AVX2-FAST-NEXT:    vpermps (%rsp), %ymm6, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm12, %ymm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm15[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm6, %ymm1
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm0[2,3],ymm9[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,1,0,2,4,5,4,6]
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm3, %ymm4
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm11 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm10, %ymm10
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3,4],ymm10[5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm14[1,1,1,1,5,5,5,5]
+; AVX2-FAST-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm4 = <5,3,u,u>
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1],ymm3[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = [0,3,1,7,0,3,1,7]
+; AVX2-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm14, %ymm7
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm13[3],ymm2[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm14, %ymm5
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm5 = ymm12[1,1,1,1,5,5,5,5]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm4, %ymm6
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm14, %ymm6
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3],ymm6[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm4, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm12, %ymm1
+; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm14, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 96(%rsi)
@@ -4064,187 +4011,171 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 96(%r9)
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 32(%r9)
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovaps %ymm1, 64(%r9)
-; AVX2-FAST-NEXT:    vmovaps %ymm7, (%r9)
+; AVX2-FAST-NEXT:    vmovaps %ymm10, (%r9)
 ; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-NEXT:    vmovaps %ymm2, 96(%rax)
-; AVX2-FAST-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX2-FAST-NEXT:    vmovaps %ymm5, 64(%rax)
+; AVX2-FAST-NEXT:    vmovaps %ymm5, 96(%rax)
+; AVX2-FAST-NEXT:    vmovaps %ymm2, 32(%rax)
+; AVX2-FAST-NEXT:    vmovaps %ymm3, 64(%rax)
 ; AVX2-FAST-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-FAST-NEXT:    addq $1192, %rsp # imm = 0x4A8
+; AVX2-FAST-NEXT:    addq $1160, %rsp # imm = 0x488
 ; AVX2-FAST-NEXT:    vzeroupper
 ; AVX2-FAST-NEXT:    retq
 ;
 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf32:
 ; AVX2-FAST-PERLANE:       # %bb.0:
-; AVX2-FAST-PERLANE-NEXT:    subq $1160, %rsp # imm = 0x488
+; AVX2-FAST-PERLANE-NEXT:    subq $1192, %rsp # imm = 0x4A8
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 480(%rdi), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 448(%rdi), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 416(%rdi), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm12 = <0,6,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm12, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm2[0,1],ymm11[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm3 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm13 = <0,6,4,u>
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm13, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm5[0,1],ymm3[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3],ymm2[4,5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm2 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm2, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[0,1],ymm6[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 416(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm7[0,1],ymm6[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm3[0,1,2,3,4,5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 384(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 384(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm12, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm0[0,1,2,3],ymm8[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm13, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm7[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm8[3,4,5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 512(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 544(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 544(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 288(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm2, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm8[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 288(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm8[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 224(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm12, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm13, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm15 = ymm11[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 320(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm15 = ymm5[0,1,2,3],ymm0[4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm2, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 608(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm3, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm14[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 576(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 672(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 640(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm5, %ymm13, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 672(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm1[0,1],ymm0[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm15 = ymm0[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 608(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 640(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 576(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm15 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm12, %ymm12
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1,2],ymm1[3,4,5],ymm12[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm14 = ymm13[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm14[3,4,5],ymm8[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 704(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 736(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm3, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm1 = <1,7,5,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5],ymm10[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm10 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm10, %ymm9
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm9[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm9[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2,3,4],ymm8[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm1, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm10, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3,4],ymm5[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm1, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm2, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm0, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm9 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm9, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm0, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3,4,5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm9, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm0, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm11[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm9, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm5, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[1,3,2,3,5,7,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm12, %ymm10, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm9, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm11, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1],ymm11[2,3],ymm7[4,5],ymm11[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,0,0,0,4,4,4,4]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm9[2,3],ymm12[4,5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -4256,8 +4187,9 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -4270,20 +4202,20 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, (%rsp), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm13[2,3],ymm9[4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm13[2,3],ymm12[4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -4291,62 +4223,63 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm8 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm10[1],ymm1[2,3,4],ymm10[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm6 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm12, %ymm13
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm14, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm3[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -4358,95 +4291,99 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm11[0,1,2,3],ymm7[4,5],ymm11[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm12 = ymm12[0,1,2,3],mem[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 464(%rdi), %xmm0
+; AVX2-FAST-PERLANE-NEXT:    vblendps $207, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[0,1,2,3],ymm6[4,5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm12[0,1],ymm0[2,3],ymm12[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 464(%rdi), %xmm1
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm2 = <4,2,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm11 = mem[0,1],ymm10[2,3],mem[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [0,2,0,6,0,2,0,6]
-; AVX2-FAST-PERLANE-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm7, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm2, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,3],ymm14[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [0,2,0,6,0,2,0,6]
+; AVX2-FAST-PERLANE-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm10, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm6[0,1,2,3],mem[4,5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm9[0,1,2,3],ymm13[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 272(%rdi), %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm13[0,1],ymm9[2,3],ymm13[4,5],ymm9[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm2, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm10 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm10 = ymm8[0,1],mem[2,3],ymm8[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm7, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm4[4,5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm14[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 656(%rdi), %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm8[2,3],ymm3[4,5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm2, %ymm14
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm7, %ymm15
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm15[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm2 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 272(%rdi), %xmm13
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[0,1,0,2,4,5,4,6]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm6, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm6, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm5, %ymm10, %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3,4],ymm8[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm9[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 656(%rdi), %xmm6
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1],ymm6[2,3],ymm12[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[0,1,0,2,4,5,4,6]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm1, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm15 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm10, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4],ymm11[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm15 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm9 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm0[2,3],ymm15[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm14 = ymm14[0,1,0,2,4,5,4,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm4, %ymm6
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm14[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[0,1],ymm4[2,3],mem[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm7, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm6[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm9[0,1],ymm0[2,3],ymm9[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm11 = ymm11[0,1,0,2,4,5,4,6]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm3, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm11 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm11 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm10, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3,4],ymm10[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[1,1,1,1,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm3[0,1,2],mem[3],ymm3[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm4 = <5,3,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm14[0,1],ymm3[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm14 = [0,3,1,7,0,3,1,7]
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm14, %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm7[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[1,1,1,1,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm13[3],ymm2[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm7 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm5, %ymm14, %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm12[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm5[0,1,2],mem[3],ymm5[4,5,6],mem[7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm6 = <5,3,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm12 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm12[0,1],ymm5[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm12 = [0,3,1,7,0,3,1,7]
-; AVX2-FAST-PERLANE-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm12, %ymm11
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm11[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm11 = ymm13[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3],ymm11[4,5,6],ymm9[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps (%rsp), %ymm6, %ymm11 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm12, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3],ymm3[4,5,6],ymm8[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm8 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm8[0,1],ymm3[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm12, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm15[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm6, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm4, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3,4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm15, %ymm14, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm6[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm9[1,1,1,1,5,5,5,5]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3],ymm6[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm4, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm12, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm14, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 96(%rsi)
@@ -4482,17 +4419,17 @@ define void @load_i32_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, (%r8)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 96(%r9)
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 32(%r9)
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm1, 64(%r9)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm7, (%r9)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm10, (%r9)
 ; AVX2-FAST-PERLANE-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, 96(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm5, 64(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm5, 96(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm2, 32(%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm3, 64(%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm0, (%rax)
-; AVX2-FAST-PERLANE-NEXT:    addq $1160, %rsp # imm = 0x488
+; AVX2-FAST-PERLANE-NEXT:    addq $1192, %rsp # imm = 0x4A8
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -7064,52 +7001,48 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-LABEL: load_i32_stride6_vf64:
 ; AVX2-SLOW:       # %bb.0:
 ; AVX2-SLOW-NEXT:    subq $2472, %rsp # imm = 0x9A8
-; AVX2-SLOW-NEXT:    vmovaps 672(%rdi), %ymm3
-; AVX2-SLOW-NEXT:    vmovaps 640(%rdi), %ymm4
+; AVX2-SLOW-NEXT:    vmovaps 672(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vmovaps 640(%rdi), %ymm3
+; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 608(%rdi), %ymm4
 ; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 320(%rdi), %ymm6
+; AVX2-SLOW-NEXT:    vmovaps 320(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm6
 ; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 352(%rdi), %ymm7
+; AVX2-SLOW-NEXT:    vmovaps 288(%rdi), %ymm7
 ; AVX2-SLOW-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 256(%rdi), %ymm8
+; AVX2-SLOW-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 224(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm5 = <0,6,4,u>
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm5, %ymm0
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
+; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vmovaps %ymm8, %ymm13
-; AVX2-SLOW-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm9 = <0,6,4,u>
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm8[0,1],ymm7[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [8589934596,8589934596,8589934596,8589934596]
 ; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm15, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[0,1],ymm3[0,1]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 608(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 576(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[0,1],ymm2[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 576(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 704(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 736(%rdi), %ymm2
@@ -7124,18 +7057,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovaps 1024(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 992(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 992(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovaps 960(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 960(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 1088(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 1120(%rdi), %ymm2
@@ -7150,18 +7082,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovaps 1408(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 1376(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 1344(%rdi), %ymm2
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 1376(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 1344(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 1472(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 1504(%rdi), %ymm2
@@ -7175,17 +7106,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %ymm2
@@ -7194,21 +7124,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm15, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 480(%rdi), %ymm4
+; AVX2-SLOW-NEXT:    vmovaps 480(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 448(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm0[0,1],ymm4[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 416(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 416(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 384(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 384(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 512(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 544(%rdi), %ymm2
@@ -7217,131 +7146,103 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm15, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 864(%rdi), %ymm3
+; AVX2-SLOW-NEXT:    vmovaps 864(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 832(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm3[0,1]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 800(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 768(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 800(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 768(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 896(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 928(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm15, %ymm1
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm15, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 1248(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovaps 1184(%rdi), %ymm0
+; AVX2-SLOW-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 1152(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 1216(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 1184(%rdi), %ymm2
-; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 1152(%rdi), %ymm0
-; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm5, %ymm5
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm1[3,4,5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vmovaps 1280(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm9, %ymm0
+; AVX2-SLOW-NEXT:    vmovaps 1248(%rdi), %ymm9
+; AVX2-SLOW-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 1216(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovaps 1312(%rdi), %ymm5
-; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm9[0,1]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovaps 1280(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovaps 1312(%rdi), %ymm13
+; AVX2-SLOW-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm15, %ymm15
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = mem[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm15 = ymm13[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm15[1],ymm0[2,3,4],ymm15[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm15 = <1,7,5,u>
+; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm15, %ymm0
+; AVX2-SLOW-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm13 = mem[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [12884901893,12884901893,12884901893,12884901893]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm14 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm13 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm0, %ymm11
+; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm15, %ymm11
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[1,3,2,3,5,7,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5],ymm11[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm15, %ymm10
+; AVX2-SLOW-NEXT:    vpermps %ymm10, %ymm0, %ymm10
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
-; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm15, %ymm7
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm7 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4],ymm7[5],ymm6[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5],ymm4[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm15, %ymm6
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm7, %ymm0, %ymm7
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm15, %ymm3
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[1,1,2,3,5,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm15, %ymm2
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpermps %ymm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
@@ -7368,8 +7269,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -7382,22 +7284,22 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1],ymm7[2,3],ymm15[4,5],ymm7[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm5[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -7407,9 +7309,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm8[2,3],mem[4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm7[2,3],mem[4,5],ymm7[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -7424,11 +7326,12 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $224, (%rsp), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm3[2,3],mem[4,5],ymm3[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -7446,19 +7349,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
@@ -7480,20 +7384,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
+; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = ymm14[0,1],mem[2,3],ymm14[4,5],mem[6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
+; AVX2-SLOW-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-SLOW-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -7527,8 +7431,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -7540,18 +7443,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $255, (%rsp), %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -7559,7 +7460,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm13[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
@@ -7573,7 +7474,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $244, (%rsp), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
@@ -7591,39 +7492,42 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vmovaps %ymm14, %ymm13
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,0,1,4,5,4,5]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,0,1,4,5,4,5]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,1,3,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -7633,10 +7537,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-SLOW-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -7649,19 +7553,19 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 272(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm1 = <4,2,u,u>
-; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm2
+; AVX2-SLOW-NEXT:    vpermps %ymm2, %ymm1, %ymm2
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
@@ -7671,17 +7575,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm14, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 464(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
@@ -7691,31 +7595,30 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,2,3],ymm2[4,5],mem[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 656(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 848(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -7726,16 +7629,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $48, (%rsp), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm6 = ymm2[0,1,2,3],mem[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 1040(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -7745,15 +7648,15 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm13, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovups %ymm3, (%rsp) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm10 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 1232(%rdi), %xmm12
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -7770,7 +7673,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm7 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 1424(%rdi), %xmm8
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm1, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -7780,12 +7683,13 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-SLOW-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm4 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 80(%rdi), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,1,0,2,4,5,4,6]
 ; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
@@ -7797,8 +7701,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6],mem[7]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovaps {{.*#+}} xmm1 = <5,3,u,u>
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm15 = ymm15[0,1],ymm0[2,3,4,5,6,7]
@@ -7809,8 +7713,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -7818,8 +7722,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -7827,35 +7731,35 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpermilps $85, (%rsp), %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-SLOW-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm13, %ymm0, %ymm13
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6],ymm12[7]
-; AVX2-SLOW-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpermps (%rsp), %ymm1, %ymm12 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm9, %ymm0, %ymm9
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6],ymm8[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm11, %ymm1, %ymm8
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm6, %ymm0, %ymm6
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6],ymm2[7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpermps %ymm3, %ymm0, %ymm0
@@ -7963,281 +7867,245 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovaps 672(%rdi), %ymm3
 ; AVX2-FAST-NEXT:    vmovaps 640(%rdi), %ymm4
 ; AVX2-FAST-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 320(%rdi), %ymm6
+; AVX2-FAST-NEXT:    vmovaps 608(%rdi), %ymm5
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 320(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm6
 ; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 352(%rdi), %ymm7
+; AVX2-FAST-NEXT:    vmovaps 288(%rdi), %ymm7
 ; AVX2-FAST-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 256(%rdi), %ymm8
+; AVX2-FAST-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 224(%rdi), %ymm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm5 = <0,6,4,u>
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm0, %ymm5, %ymm0
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
+; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vmovaps %ymm8, %ymm13
-; AVX2-FAST-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm10 = <0,6,4,u>
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm10, %ymm0
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm8[0,1],ymm7[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [8589934596,8589934596,8589934596,8589934596]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[0,1],ymm3[0,1]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovaps 608(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [8589934596,8589934596,8589934596,8589934596]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 576(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm4[0,1],ymm3[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm3[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 576(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 704(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 736(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 704(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 736(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1056(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 1056(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 1024(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1024(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 992(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 960(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 1088(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1120(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 1088(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 1120(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1440(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 1440(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 1408(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1408(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 1376(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 1344(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 1472(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1504(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 1472(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 1504(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovaps (%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovaps 480(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 480(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 448(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 448(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 416(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 384(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 512(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 544(%rdi), %ymm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 864(%rdi), %ymm3
-; AVX2-FAST-NEXT:    vmovaps 832(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm3[0,1]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
+; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 512(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 544(%rdi), %ymm3
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 864(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 832(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm2[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 800(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 768(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm5, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovaps 896(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 928(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 896(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovaps 928(%rdi), %ymm3
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1248(%rdi), %ymm1
+; AVX2-FAST-NEXT:    vmovaps 1184(%rdi), %ymm1
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1216(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovaps 1184(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovaps 1152(%rdi), %ymm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1152(%rdi), %ymm0
-; AVX2-FAST-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm1[3,4,5],ymm5[6,7]
-; AVX2-FAST-NEXT:    vmovaps 1280(%rdi), %ymm1
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovaps 1312(%rdi), %ymm5
-; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm15, %ymm15
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm10, %ymm1
+; AVX2-FAST-NEXT:    vmovaps 1248(%rdi), %ymm10
+; AVX2-FAST-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 1216(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm2[0,1],ymm10[0,1]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm2[0,1,2,3,4,5],ymm10[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,2,2,2,4,6,6,6]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vmovaps 1280(%rdi), %ymm2
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovaps 1312(%rdi), %ymm14
+; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm14[0,1,2,3],ymm2[4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm15 = ymm13[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm15[1],ymm0[2,3,4],ymm15[5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = <1,7,5,u>
+; AVX2-FAST-NEXT:    vpermps %ymm15, %ymm1, %ymm0
+; AVX2-FAST-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [12884901893,12884901893,12884901893,12884901893]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = mem[1,3,2,3,5,7,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm14 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm13 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm15 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm12, %ymm1, %ymm12
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm13 = ymm13[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2],ymm13[3,4,5],ymm12[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm0, %ymm11
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5],ymm11[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm10, %ymm15, %ymm10
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm10 = ymm11[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3,4,5],ymm11[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm1, %ymm7
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm9[3,4,5],ymm7[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm7, %ymm15, %ymm7
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm8[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm7 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4],ymm7[5],ymm6[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm0, %ymm4
+; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm1, %ymm4
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[1,3,2,3,5,7,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm5, %ymm0, %ymm5
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm10[1,3,2,3,5,7,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
@@ -8245,10 +8113,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
-; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm0 = [2,0,6,4,2,0,6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm4 = [2,0,6,4,2,0,6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
@@ -8264,9 +8132,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = ymm5[0,1],mem[2,3],ymm5[4,5],mem[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
@@ -8277,52 +8146,53 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[2,3,2,3,6,7,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm6[2,3],ymm8[4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm7[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm14[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm12[2,3],ymm2[4,5],ymm12[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm12[0,1],ymm11[2,3],ymm12[4,5],ymm11[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm13[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5,6],ymm2[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $238, (%rsp), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[2,3,2,3,6,7,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm4[2,3],mem[4,5],ymm4[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
@@ -8338,9 +8208,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm0[2,3],mem[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
@@ -8356,9 +8227,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
-; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm2 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm11[2,3],mem[4,5],ymm11[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm0, %ymm2
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2,3],mem[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm4, %ymm2
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
@@ -8369,21 +8241,21 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm2 = ymm2[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm11[2,3],ymm14[4,5],ymm11[6,7]
-; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[2,3,2,3,6,7,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm6[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermps %ymm1, %ymm4, %ymm0
+; AVX2-FAST-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[2,3,2,3,6,7,6,7]
+; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[0,0,2,3,4,4,6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
+; AVX2-FAST-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -8415,8 +8287,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -8428,43 +8299,40 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm13[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm15[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm14[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2,3,4],ymm12[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm13[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps $255, (%rsp), %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
@@ -8481,38 +8349,41 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm11, %ymm12
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovaps %ymm14, %ymm13
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps $255, (%rsp), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,1,3,3,4,5,7,7]
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -8522,10 +8393,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-FAST-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -8538,39 +8409,39 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm2 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm1 = ymm0[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 272(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = <4,2,u,u>
-; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm2
+; AVX2-FAST-NEXT:    vpermps %ymm2, %ymm1, %ymm2
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = mem[0,1],ymm0[2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = ymm0[0,1],mem[2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [0,2,0,6,0,2,0,6]
 ; AVX2-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vpermps %ymm14, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 464(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
@@ -8580,12 +8451,12 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,2,3],ymm2[4,5],mem[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm6 = mem[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 656(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -8596,21 +8467,19 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps $240, (%rsp), %ymm13, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 848(%rdi), %xmm2
-; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = mem[0,1],ymm15[2,3],mem[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
@@ -8620,12 +8489,12 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm6 = ymm2[0,1,2,3],mem[4,5,6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 1040(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -8635,14 +8504,15 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermps %ymm13, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps $48, (%rsp), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm3 = ymm15[0,1,2,3],mem[4,5],ymm15[6,7]
-; AVX2-FAST-NEXT:    vmovups %ymm3, (%rsp) # 32-byte Spill
+; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm10 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps 1232(%rdi), %xmm12
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -8659,7 +8529,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm7 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps 1424(%rdi), %xmm8
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm1, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -8669,12 +8539,13 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm0, %ymm3
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FAST-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm5 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm4 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 80(%rdi), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
@@ -8686,8 +8557,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm0 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6],mem[7]
+; AVX2-FAST-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = <5,3,u,u>
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm15 = ymm15[0,1],ymm0[2,3,4,5,6,7]
@@ -8698,8 +8569,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -8707,8 +8578,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -8716,8 +8587,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-NEXT:    vblendps $8, (%rsp), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -8725,26 +8596,26 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm13, %ymm0, %ymm13
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6],ymm12[7]
-; AVX2-FAST-NEXT:    vpermps (%rsp), %ymm1, %ymm12 # 32-byte Folded Reload
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6,7]
+; AVX2-FAST-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm9, %ymm0, %ymm9
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6],ymm8[7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm11, %ymm1, %ymm8
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm6, %ymm0, %ymm6
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6],ymm2[7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm0, %ymm0
@@ -8849,52 +8720,48 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-LABEL: load_i32_stride6_vf64:
 ; AVX2-FAST-PERLANE:       # %bb.0:
 ; AVX2-FAST-PERLANE-NEXT:    subq $2472, %rsp # imm = 0x9A8
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 672(%rdi), %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 640(%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 672(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 640(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 608(%rdi), %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 320(%rdi), %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 320(%rdi), %ymm5
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 352(%rdi), %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 288(%rdi), %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 288(%rdi), %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 256(%rdi), %ymm8
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 224(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm5 = <0,6,4,u>
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm5, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm8[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm8, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm9 = <0,6,4,u>
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm8[0,1],ymm7[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm7[0,1,2,3],ymm6[4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [8589934596,8589934596,8589934596,8589934596]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm15, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[0,1],ymm3[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 608(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 576(%rdi), %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[0,1],ymm2[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 576(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 704(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 736(%rdi), %ymm2
@@ -8909,18 +8776,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1024(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 992(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 992(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 960(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 960(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1088(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1120(%rdi), %ymm2
@@ -8935,18 +8801,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1408(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1376(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1344(%rdi), %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1376(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1344(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm0, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm2[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1472(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1504(%rdi), %ymm2
@@ -8960,17 +8825,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm2[4,5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %ymm2
@@ -8979,21 +8843,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm15, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 480(%rdi), %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 480(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 448(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm0[0,1],ymm4[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm9[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 416(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 416(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 384(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 384(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 512(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 544(%rdi), %ymm2
@@ -9002,131 +8865,103 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm15, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 864(%rdi), %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 864(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 832(%rdi), %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm0[0,1],ymm3[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 800(%rdi), %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 768(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 800(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 768(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm5, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 896(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 928(%rdi), %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm15, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm15, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1248(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1184(%rdi), %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1152(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1216(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],ymm1[0,1]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,2,4,5,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1184(%rdi), %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1152(%rdi), %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm5, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2],ymm1[3,4,5],ymm5[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1280(%rdi), %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm9, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1248(%rdi), %ymm9
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1216(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovaps 1312(%rdi), %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],ymm9[0,1]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm1[0,1,2,3,4,5],ymm9[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[0,2,2,2,4,6,6,6]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1280(%rdi), %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovaps 1312(%rdi), %ymm13
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm13[0,1,2,3],ymm1[4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm15, %ymm15
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm15 = ymm13[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm15 = ymm0[0],ymm15[1],ymm0[2,3,4],ymm15[5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm0 = <1,7,5,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm15 = <1,7,5,u>
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm15, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm13 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm0 = [12884901893,12884901893,12884901893,12884901893]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd {{.*#+}} ymm15 = [12884901893,12884901893,12884901893,12884901893]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm14 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm13 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm13 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $237, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5],ymm13[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm14[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm13 = ymm14[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3,4],ymm13[5],ymm12[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm0, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm15, %ymm11
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[1,3,2,3,5,7,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3,4,5],ymm11[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm15, %ymm10
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm10, %ymm0, %ymm10
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3,4,5],ymm10[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm9 = ymm9[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm4[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2,3,4],ymm10[5],ymm9[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2],ymm9[3,4,5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm15, %ymm7
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm6 = ymm6[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm7 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4],ymm7[5],ymm6[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2],ymm6[3,4,5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm15, %ymm6
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm8 = ymm8[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3,4,5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm7, %ymm0, %ymm7
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm15, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3,4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm0, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm4[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $229, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[1,1,2,3,5,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm3[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm15, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm9[1,3,2,3,5,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3,4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
@@ -9153,8 +8988,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm5[2,3],mem[4,5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -9167,22 +9003,22 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm15[0,1],ymm7[2,3],ymm15[4,5],ymm7[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm14[0,1],ymm15[2,3],ymm14[4,5],ymm15[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,0,0,0,4,4,4,4]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm5[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -9192,9 +9028,9 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm8[2,3],mem[4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm7[2,3],mem[4,5],ymm7[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -9209,11 +9045,12 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, (%rsp), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm3[2,3],mem[4,5],ymm3[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
@@ -9231,19 +9068,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm14[2,3],mem[4,5],ymm14[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
@@ -9265,20 +9103,20 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,0,2,3,4,4,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[2,3,2,3,6,7,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm14[0,1],mem[2,3],ymm14[4,5],mem[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[2,0,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,0,0,0,4,4,4,4]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $0, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,0,0,0,4,4,4,4]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $224, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm2 = mem[0,0,2,3,4,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
@@ -9312,8 +9150,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -9325,18 +9162,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, (%rsp), %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -9344,7 +9179,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm13[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $34, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4],mem[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
@@ -9358,7 +9193,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $244, (%rsp), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
@@ -9376,39 +9211,42 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm5[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[0,1,3,3,4,5,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovaps %ymm14, %ymm13
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm12[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm3[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm15[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm6[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm5[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3,4],ymm8[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3,4],ymm7[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm11[0,1,0,1,4,5,4,5]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm10[0,1,3,3,4,5,7,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm10[0,1,0,1,4,5,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm9[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
@@ -9418,10 +9256,10 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $244, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,3,3,4,5,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[3,3,3,3,7,7,7,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm1 = ymm4[3,3,3,3,7,7,7,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3,4],ymm5[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,2,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4],ymm0[5,6,7]
@@ -9434,19 +9272,19 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm2 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 272(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm1 = <4,2,u,u>
-; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm2, %ymm1, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm0[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
@@ -9456,17 +9294,17 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm14, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 464(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1],ymm10[2,3],ymm11[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
@@ -9476,31 +9314,30 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1,2,3],ymm2[4,5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm6 = mem[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 656(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm3[0,1],mem[2,3],ymm3[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $243, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm3 = mem[0,1],ymm3[2,3],mem[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm11[0,1,2,3],ymm12[4,5],ymm11[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm13[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 848(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -9511,16 +9348,16 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $48, (%rsp), %ymm2, %ymm3 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm6 = ymm2[0,1,2,3],mem[4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm6, (%rsp) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm2[0,1,2,3],mem[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1040(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -9530,15 +9367,15 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm13, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX2-FAST-PERLANE-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm3, (%rsp) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm10 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm10 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1232(%rdi), %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1],ymm12[2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -9555,7 +9392,7 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm7 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 1424(%rdi), %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm2 = ymm2[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm1, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3,4,5,6,7]
@@ -9565,12 +9402,13 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm0, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $240, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm5 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm5 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps $48, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm4 = ymm2[0,1,2,3],mem[4,5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rdi), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm2[2,3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,1,0,2,4,5,4,6]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3,4,5,6,7]
@@ -9582,8 +9420,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm0 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm0 = ymm0[0,1,2],mem[3],ymm0[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps {{.*#+}} xmm1 = <5,3,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm15 = ymm15[0,1],ymm0[2,3,4,5,6,7]
@@ -9594,8 +9432,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -9603,8 +9441,8 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
@@ -9612,35 +9450,35 @@ define void @load_i32_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, (%rsp), %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm14 = mem[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps $136, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
-; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6],mem[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm14 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    # ymm14 = ymm14[0,1,2],mem[3],ymm14[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm15 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm13, %ymm0, %ymm13
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3,4],ymm13[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm10[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6],ymm12[7]
-; AVX2-FAST-PERLANE-NEXT:    vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2],ymm12[3],ymm10[4,5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpermps (%rsp), %ymm1, %ymm12 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm9, %ymm0, %ymm9
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6],ymm8[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2],ymm8[3],ymm7[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm11, %ymm1, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm6, %ymm0, %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm5 = ymm5[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm4, %ymm1, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermps %ymm3, %ymm0, %ymm0

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
index 27c77459f4696..f4cf3b2188993 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-7.ll
@@ -885,7 +885,7 @@ define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm7
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm11
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm12
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
@@ -988,7 +988,7 @@ define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm7
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6],ymm7[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 8(%rdi), %xmm11
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm12
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
@@ -1091,7 +1091,7 @@ define void @load_i32_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4],ymm6[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 80(%rdi), %xmm7
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm11 = ymm10[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6],ymm7[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3],ymm11[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 8(%rdi), %xmm11
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3]
@@ -1873,7 +1873,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm3 = ymm12[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm14
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2,3]
@@ -1885,7 +1885,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm3 = ymm5[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 232(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm13
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm13[1],xmm3[2,3]
@@ -2099,7 +2099,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm3[8,9,10,11,12,13,14,15],ymm11[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm11[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 8(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
@@ -2111,7 +2111,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 232(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %xmm13
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3]
@@ -2326,7 +2326,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm3 = ymm12[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 8(%rdi), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm14
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2,3]
@@ -2338,7 +2338,7 @@ define void @load_i32_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm3 = ymm5[8,9,10,11,12,13,14,15],ymm15[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm15[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 232(%rdi), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm13[1],xmm3[2,3]
@@ -4198,7 +4198,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm2 = ymm10[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3]
@@ -4212,7 +4212,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 528(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm3 = ymm11[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 456(%rdi), %xmm14
 ; AVX2-SLOW-NEXT:    vmovdqa 480(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm14 = xmm14[0],xmm1[1],xmm14[2,3]
@@ -4226,7 +4226,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 752(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm12 = ymm9[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm3[3],ymm12[4,5,6],ymm3[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm3[3],ymm12[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 680(%rdi), %xmm14
 ; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4242,7 +4242,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu (%rsp), %ymm6 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm13 = ymm5[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3],ymm13[4,5,6],ymm12[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3],ymm13[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 232(%rdi), %xmm13
 ; AVX2-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm14
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3]
@@ -4708,7 +4708,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm10[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 8(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2,3]
@@ -4722,7 +4722,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 528(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm2 = ymm12[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm0[3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 456(%rdi), %xmm15
 ; AVX2-FAST-NEXT:    vmovdqa 480(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
@@ -4736,7 +4736,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 752(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm11 = ymm14[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm14[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm2[3],ymm11[4,5,6],ymm2[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2],ymm2[3],ymm11[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 680(%rdi), %xmm15
 ; AVX2-FAST-NEXT:    vmovdqa 704(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4751,7 +4751,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm14 = ymm9[8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3],ymm14[4,5,6],ymm11[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 232(%rdi), %xmm14
 ; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm14 = xmm14[0],xmm2[1],xmm14[2,3]
@@ -5221,7 +5221,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 80(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm2 = ymm10[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 8(%rdi), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0],xmm2[1],xmm3[2,3]
@@ -5235,7 +5235,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 528(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm3 = ymm11[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3],ymm3[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 456(%rdi), %xmm14
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 480(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm14 = xmm14[0],xmm1[1],xmm14[2,3]
@@ -5249,7 +5249,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 752(%rdi), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm12 = ymm9[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm3[3],ymm12[4,5,6],ymm3[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm3[3],ymm12[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 680(%rdi), %xmm14
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 704(%rdi), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -5265,7 +5265,7 @@ define void @load_i32_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm6 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm13 = ymm5[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3],ymm13[4,5,6],ymm12[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3],ymm13[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 232(%rdi), %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %xmm14
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3]
@@ -8972,7 +8972,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 232(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8988,7 +8988,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 752(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 680(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9004,7 +9004,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 1200(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm9[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 1128(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 1152(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9020,7 +9020,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 1648(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm5[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 1576(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 1600(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9035,7 +9035,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 1424(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm15[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm15[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 1352(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 1376(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
@@ -9048,7 +9048,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 8(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9063,7 +9063,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 456(%rdi), %xmm15
 ; AVX2-SLOW-NEXT:    vmovdqa 480(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9080,7 +9080,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpalignr {{.*#+}} ymm14 = ymm9[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd 904(%rdi), %xmm15
 ; AVX2-SLOW-NEXT:    vmovdqa 928(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
@@ -10015,7 +10015,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 232(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 256(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10030,7 +10030,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 752(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm7[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 680(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 704(%rdi), %xmm5
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3]
@@ -10045,7 +10045,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 1200(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm9[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 1128(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 1152(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10061,7 +10061,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 1648(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm14[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm14[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 1576(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 1600(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10076,7 +10076,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovdqa 1424(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 1352(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 1376(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
@@ -10089,7 +10089,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 8(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
 ; AVX2-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10105,7 +10105,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa 528(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm1 = ymm13[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm13[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 456(%rdi), %xmm15
 ; AVX2-FAST-NEXT:    vmovdqa 480(%rdi), %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10122,7 +10122,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpalignr {{.*#+}} ymm14 = ymm10[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm10[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6],ymm0[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpbroadcastd 904(%rdi), %xmm15
 ; AVX2-FAST-NEXT:    vmovdqa 928(%rdi), %xmm0
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]
@@ -11063,7 +11063,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 304(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm4[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 232(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 256(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11079,7 +11079,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 752(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm11[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm11[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 680(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 704(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11095,7 +11095,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1200(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm9[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 1128(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1152(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11111,7 +11111,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1648(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm5[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 1576(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1600(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11126,7 +11126,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1424(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm15[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm15[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 1352(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 1376(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
@@ -11139,7 +11139,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm1 = ymm8[8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7],ymm8[24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 8(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11154,7 +11154,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 456(%rdi), %xmm15
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 480(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -11171,7 +11171,7 @@ define void @load_i32_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpalignr {{.*#+}} ymm14 = ymm9[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6],ymm0[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm0[3],ymm14[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastd 904(%rdi), %xmm15
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 928(%rdi), %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm15 = xmm15[0],xmm0[1],xmm15[2,3]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 7e72435032a37..533f7c07a1553 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -1939,232 +1939,216 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX2-ONLY-LABEL: load_i8_stride6_vf32:
 ; AVX2-ONLY:       # %bb.0:
+; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %ymm4
 ; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %ymm5
-; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm6
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm1, %ymm2, %ymm7
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm10
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm8, %xmm11
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm0[2,3],ymm4[2,3]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,10],zero,zero,zero,ymm3[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[20,26]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm4[0,1]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,8,14],zero,zero,ymm4[16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[18,24,30],zero,zero
-; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm8, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm11, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm3
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm8 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm1, %ymm2, %ymm9
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = xmm9[0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero,xmm9[u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[2,8,14],zero,zero,xmm10[0,6,12,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm6, %xmm11
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0>
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm0[0,1],ymm3[0,1]
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm3[2,3]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm5, %ymm6, %ymm3
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm11, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero,xmm9[u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[3,9,15],zero,zero,xmm10[1,7,13,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm9, %xmm10, %xmm9
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm9, %ymm3, %ymm3
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm11 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm2, %ymm1, %ymm9
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm10[4,10],zero,zero,zero,xmm10[2,8,14,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm13 = xmm9[2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero,xmm9[u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm12, %xmm13, %xmm12
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm13 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm6, %ymm5, %ymm13
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm12, %ymm14, %ymm12
+; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm14
+; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm14, %ymm4, %ymm8
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,11],zero,zero,zero,xmm10[3,9,15,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero,xmm9[u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpor %xmm10, %xmm9, %xmm9
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,0,6,12],zero,zero,zero,xmm8[4,10],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm9, %ymm13, %ymm13
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12]
 ; AVX2-ONLY-NEXT:    vpor %xmm7, %xmm10, %xmm7
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,ymm3[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[5,11],zero,zero,zero,ymm3[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[21,27]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm11 = ymm4[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15],zero,zero,ymm4[17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[19,25,31],zero,zero
-; AVX2-ONLY-NEXT:    vpor %ymm10, %ymm11, %ymm10
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm7, %ymm10, %ymm7
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm2, %ymm1, %ymm10
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm13, %xmm14, %xmm13
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,ymm3[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,6,12],zero,zero,zero,ymm3[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,22,28]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,10],zero,zero,zero,ymm4[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[20,26],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %ymm14, %ymm15, %ymm14
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm9, %ymm6, %ymm5, %ymm9
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm13, %ymm14, %ymm13
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm11, %xmm10, %xmm10
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,ymm3[5,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,7,13],zero,zero,zero,ymm3[21,27],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[17,23,29]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm14 = ymm4[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[5,11],zero,zero,zero,ymm4[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[21,27],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %ymm11, %ymm14, %ymm11
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm10, %ymm11, %ymm0
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm8
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u],zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12]
-; AVX2-ONLY-NEXT:    vpor %xmm14, %xmm10, %xmm10
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm13, %ymm10, %ymm13
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm5, %ymm6, %ymm10
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u],zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13]
-; AVX2-ONLY-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm12, %ymm7, %ymm7
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,1,7,13],zero,zero,zero,xmm8[5,11],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u],zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13]
+; AVX2-ONLY-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm4, %ymm14, %ymm9
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm0, %ymm8, %ymm9
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm5, %ymm6, %ymm0
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u],zero,zero,zero,xmm5[4,10],zero,zero,zero,xmm5[2,8,14]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = xmm0[u,u,u,u,u,2,8,14],zero,zero,xmm0[0,6,12],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm8, %xmm6
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm2, %ymm1, %ymm1
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm13, %ymm8, %ymm8
+; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm4, %ymm14, %ymm4
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm4, %xmm11
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm13 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm13, %xmm14, %xmm13
+; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm2, %ymm1, %ymm1
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = <0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm6, %ymm5, %ymm5
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10,u,u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm12 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vpor %xmm8, %xmm12, %xmm8
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm12 = ymm4[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,6,12],zero,zero,zero,ymm4[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,22,28],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,ymm3[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,8,14],zero,zero,ymm3[16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[18,24,30]
-; AVX2-ONLY-NEXT:    vpor %ymm12, %ymm14, %ymm12
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm12[5,6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm8, %ymm6, %ymm6
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,zero,xmm5[5,11],zero,zero,zero,xmm5[3,9,15]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,3,9,15],zero,zero,xmm0[1,7,13],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm12, %xmm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm12[4,5,6,7]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm12
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm6, %ymm12, %ymm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u],zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm11, %xmm4
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,7,13],zero,zero,zero,xmm2[5,11,u,u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[5,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,7,13],zero,zero,zero,ymm4[21,27],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[17,23,29],zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[3,9,15],zero,zero,ymm3[17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[19,25,31]
-; AVX2-ONLY-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm10, %xmm2
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm1, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u,u],zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm3 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm2
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm1, %ymm4, %ymm1
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u],zero,zero,xmm2[0,6,12],zero,zero,zero,xmm2[4,10]
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0,1,2],ymm4[3,4,5,6,7],ymm0[8,9,10],ymm4[11,12,13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,xmm2[1,7,13],zero,zero,zero,xmm2[5,11]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm3 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
-; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero
+; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm4, %xmm2
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7],ymm7[8,9,10],ymm2[11,12,13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX2-ONLY-NEXT:    vmovdqa %ymm1, (%rsi)
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-ONLY-NEXT:    vmovdqa %ymm0, (%rsi)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm2, (%rdx)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm13, (%rcx)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm9, (%r8)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm7, (%rcx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm8, (%r8)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm6, (%r9)
 ; AVX2-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-ONLY-NEXT:    vmovdqa %ymm0, (%rax)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm1, (%rax)
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
 ; AVX512F-LABEL: load_i8_stride6_vf32:
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm17
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm1
 ; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm6
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],mem[2,3]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[4,10],zero,zero,zero,ymm0[18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm7
+; AVX512F-NEXT:    vpternlogq $202, %ymm3, %ymm17, %ymm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm1[2,3],mem[2,3]
 ; AVX512F-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm4 = ymm1[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero,ymm1[16,22,28],zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %ymm2, %ymm4, %ymm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-NEXT:    vmovdqa %ymm2, %ymm7
-; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm3, %ymm7
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm9
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm8, %xmm10, %xmm10
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, %ymm4, %ymm11, %ymm10
-; AVX512F-NEXT:    vmovdqa 160(%rdi), %ymm8
-; AVX512F-NEXT:    vmovdqa %ymm2, %ymm12
-; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm8, %ymm12
-; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u],zero,zero,xmm13[0,6,12],zero,zero,zero,xmm13[4,10]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm14 = xmm12[u,u,u,u,u,u,4,10],zero,zero,zero,xmm12[2,8,14],zero,zero
-; AVX512F-NEXT:    vpor %xmm4, %xmm14, %xmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm1, %ymm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $248, %ymm16, %ymm4, %ymm11
+; AVX512F-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm14
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm13, %ymm14
+; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[0,6,12],zero,zero,zero,xmm15[4,10]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,4,10],zero,zero,zero,xmm14[2,8,14],zero,zero
+; AVX512F-NEXT:    vpor %xmm4, %xmm12, %xmm4
 ; AVX512F-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3,4,5,6,7],ymm10[8,9,10],ymm4[11,12,13,14,15]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %ymm4, %ymm17
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm0[5,11],zero,zero,zero,ymm0[19,25,31,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm14 = ymm1[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero,ymm1[17,23,29],zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %ymm10, %ymm14, %ymm10
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7],ymm11[8,9,10],ymm4[11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %ymm2, %ymm18
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm7, %xmm9, %xmm7
-; AVX512F-NEXT:    vpternlogq $226, %ymm10, %ymm11, %ymm7
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,u,u],zero,zero,xmm13[1,7,13],zero,zero,zero,xmm13[5,11]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,u,5,11],zero,zero,zero,xmm12[3,9,15],zero,zero
-; AVX512F-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512F-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-NEXT:    vpblendw {{.*#+}} ymm9 = ymm7[0,1,2],ymm9[3,4,5,6,7],ymm7[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[0,6,12],zero,zero,zero,ymm0[20,26,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero,ymm1[18,24,30],zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %ymm9, %ymm10, %ymm10
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-NEXT:    vmovdqa %ymm9, %ymm11
-; AVX512F-NEXT:    vpternlogq $202, %ymm3, %ymm5, %ymm11
-; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm12[4,10],zero,zero,zero,xmm12[2,8,14,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm14 = xmm11[2,8,14],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm13, %xmm14, %xmm13
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $248, %ymm16, %ymm7, %ymm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm15[u,u,u,u,u,u],zero,zero,xmm15[1,7,13],zero,zero,zero,xmm15[5,11]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,u,5,11],zero,zero,zero,xmm14[3,9,15],zero,zero
+; AVX512F-NEXT:    vpor %xmm7, %xmm10, %xmm7
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm8, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm17, %ymm3, %ymm10
+; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm12, %xmm14, %xmm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm5, %ymm9
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, %ymm10, %ymm16, %ymm13
-; AVX512F-NEXT:    vmovdqa %ymm2, %ymm10
-; AVX512F-NEXT:    vpternlogq $202, %ymm8, %ymm6, %ymm10
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm15 = xmm10[u,u,u,u,u,0,6,12],zero,zero,zero,xmm10[4,10],zero,zero,zero
-; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm14
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
+; AVX512F-NEXT:    vpternlogq $248, %ymm16, %ymm12, %ymm14
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm13, %ymm6, %ymm12
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[0,6,12]
 ; AVX512F-NEXT:    vpor %xmm4, %xmm15, %xmm4
 ; AVX512F-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $184, %ymm13, %ymm15, %ymm4
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[1,7,13],zero,zero,zero,ymm0[21,27,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero,ymm1[19,25,31],zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %ymm7, %ymm13, %ymm7
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm12[5,11],zero,zero,zero,xmm12[3,9,15,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[3,9,15],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm12, %xmm11, %xmm11
-; AVX512F-NEXT:    vpternlogq $226, %ymm7, %ymm16, %ymm11
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm10[u,u,u,u,u,1,7,13],zero,zero,zero,xmm10[5,11],zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
-; AVX512F-NEXT:    vpor %xmm7, %xmm10, %xmm7
-; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-NEXT:    vpternlogq $184, %ymm11, %ymm15, %ymm7
-; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm8, %ymm9
-; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm6
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,2,8,14],zero,zero,xmm9[0,6,12],zero,zero,zero
-; AVX512F-NEXT:    vpor %xmm8, %xmm10, %xmm8
-; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-NEXT:    vpternlogq $202, %ymm3, %ymm5, %ymm2
-; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm2[4,10],zero,zero,zero,xmm2[2,8,14],zero,zero,xmm2[u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm5, %xmm10, %xmm5
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[0,6,12],zero,zero,zero,ymm1[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,22,28],zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,ymm0[0,6,12,u,u,u,u,u],zero,zero,zero,ymm0[2,8,14],zero,zero,ymm0[16,22,28,u,u,u,u,u],zero,zero,zero,ymm0[18,24,30]
-; AVX512F-NEXT:    vpor %ymm10, %ymm11, %ymm10
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm10[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-NEXT:    vpternlogq $226, %ymm8, %ymm15, %ymm5
+; AVX512F-NEXT:    vpternlogq $184, %ymm14, %ymm15, %ymm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm11, %xmm10, %xmm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpternlogq $248, %ymm16, %ymm10, %ymm9
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[1,7,13]
+; AVX512F-NEXT:    vpor %xmm2, %xmm10, %xmm2
+; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpternlogq $184, %ymm9, %ymm15, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm13, %ymm8
+; AVX512F-NEXT:    vextracti128 $1, %ymm8, %xmm6
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm8[u,u,u,u,u,2,8,14],zero,zero,xmm8[0,6,12],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm9, %xmm10, %xmm9
+; AVX512F-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-NEXT:    vpternlogq $202, %ymm17, %ymm3, %ymm0
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm3[0,6,12],zero,zero,zero,xmm3[4,10,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $226, %ymm9, %ymm15, %ymm5
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,3,9,15],zero,zero,xmm9[1,7,13],zero,zero,zero
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,3,9,15],zero,zero,xmm8[1,7,13],zero,zero,zero
 ; AVX512F-NEXT:    vpor %xmm6, %xmm8, %xmm6
 ; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[1,7,13],zero,zero,zero,xmm3[5,11,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[5,11],zero,zero,zero,xmm2[3,9,15],zero,zero,xmm2[u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[5,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[1,7,13],zero,zero,zero,ymm1[21,27],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[17,23,29],zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[1,7,13,u,u,u,u,u],zero,zero,zero,ymm0[3,9,15],zero,zero,ymm0[17,23,29,u,u,u,u,u],zero,zero,zero,ymm0[19,25,31]
-; AVX512F-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm0[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-NEXT:    vpternlogq $226, %ymm6, %ymm15, %ymm0
-; AVX512F-NEXT:    vmovdqa64 %ymm17, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %ymm18, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %ymm18, (%rsi)
+; AVX512F-NEXT:    vmovdqa %ymm7, (%rdx)
 ; AVX512F-NEXT:    vmovdqa %ymm4, (%rcx)
-; AVX512F-NEXT:    vmovdqa %ymm7, (%r8)
+; AVX512F-NEXT:    vmovdqa %ymm2, (%r8)
 ; AVX512F-NEXT:    vmovdqa %ymm5, (%r9)
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-NEXT:    vzeroupper
@@ -2173,115 +2157,106 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-LABEL: load_i8_stride6_vf32:
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm0
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[2,8,14],zero,zero,ymm0[16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[18,24,30],zero,zero
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm2[2,3],mem[2,3]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm2[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,10],zero,zero,zero,ymm2[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[20,26]
-; AVX512BW-NEXT:    vpor %ymm4, %ymm5, %ymm4
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm8
+; AVX512BW-NEXT:    movw $-28124, %r10w # imm = 0x9224
+; AVX512BW-NEXT:    kmovd %r10d, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
 ; AVX512BW-NEXT:    movw $18724, %r10w # imm = 0x4924
 ; AVX512BW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm6, %ymm7 {%k1}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm8[2,8,14],zero,zero,xmm8[0,6,12,u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm5, %xmm5
 ; AVX512BW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-NEXT:    vmovdqu8 %ymm4, %ymm5 {%k2}
-; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm4
-; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm4, %ymm9 {%k1}
-; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm10[u,u,u,u,u,u],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[4,10]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm9[u,u,u,u,u,u,4,10],zero,zero,zero,xmm9[2,8,14],zero,zero
-; AVX512BW-NEXT:    vpor %xmm11, %xmm12, %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm5[0,1,2],ymm11[3,4,5,6,7],ymm5[8,9,10],ymm11[11,12,13,14,15]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[3,9,15],zero,zero,ymm0[17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[19,25,31],zero,zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,ymm2[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[5,11],zero,zero,zero,ymm2[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[21,27]
-; AVX512BW-NEXT:    vpor %ymm11, %ymm12, %ymm11
+; AVX512BW-NEXT:    kmovd %r10d, %k3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm3
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
+; AVX512BW-NEXT:    vpor %xmm12, %xmm13, %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[3,9,15],zero,zero,xmm8[1,7,13,u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-NEXT:    vmovdqu8 %ymm11, %ymm7 {%k2}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm10[u,u,u,u,u,u],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[5,11]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,5,11],zero,zero,zero,xmm9[3,9,15],zero,zero
-; AVX512BW-NEXT:    vpor %xmm8, %xmm9, %xmm8
-; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,10],zero,zero,zero,ymm0[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[20,26],zero,zero,zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,ymm2[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,6,12],zero,zero,zero,ymm2[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[16,22,28]
-; AVX512BW-NEXT:    vpor %ymm8, %ymm9, %ymm9
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm9, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
+; AVX512BW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
 ; AVX512BW-NEXT:    movw $9362, %di # imm = 0x2492
 ; AVX512BW-NEXT:    kmovd %edi, %k3
-; AVX512BW-NEXT:    vpblendmw %ymm6, %ymm1, %ymm10 {%k3}
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm8, %xmm12, %xmm8
+; AVX512BW-NEXT:    vpor %xmm7, %xmm12, %xmm7
 ; AVX512BW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
 ; AVX512BW-NEXT:    kmovd %edi, %k4
-; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k4}
-; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm3, %ymm9 {%k1}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm9[u,u,u,u,u,0,6,12],zero,zero,zero,xmm9[4,10],zero,zero,zero
-; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm13
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,u],zero,zero,zero,xmm13[2,8,14],zero,zero,xmm13[0,6,12]
-; AVX512BW-NEXT:    vpor %xmm12, %xmm14, %xmm12
-; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
+; AVX512BW-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
+; AVX512BW-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512BW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512BW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
 ; AVX512BW-NEXT:    kmovd %edi, %k2
-; AVX512BW-NEXT:    vmovdqu8 %ymm12, %ymm8 {%k2}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,11],zero,zero,zero,ymm0[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[21,27],zero,zero,zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,ymm2[5,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,7,13],zero,zero,zero,ymm2[21,27],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[17,23,29]
-; AVX512BW-NEXT:    vpor %ymm12, %ymm14, %ymm12
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqu8 %ymm13, %ymm7 {%k2}
+; AVX512BW-NEXT:    movw $9289, %di # imm = 0x2449
+; AVX512BW-NEXT:    kmovd %edi, %k5
+; AVX512BW-NEXT:    vmovdqu16 %ymm8, %ymm1 {%k5}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm11, %xmm10, %xmm10
-; AVX512BW-NEXT:    vmovdqu8 %ymm12, %ymm10 {%k4}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,1,7,13],zero,zero,zero,xmm9[5,11],zero,zero,zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u],zero,zero,zero,xmm13[3,9,15],zero,zero,xmm13[1,7,13]
-; AVX512BW-NEXT:    vpor %xmm9, %xmm11, %xmm9
+; AVX512BW-NEXT:    vpor %xmm8, %xmm10, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
+; AVX512BW-NEXT:    vpor %xmm9, %xmm10, %xmm9
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm10 {%k2}
-; AVX512BW-NEXT:    vmovdqu16 %ymm6, %ymm1 {%k1}
-; AVX512BW-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm6[0,6,12],zero,zero,zero,xmm6[4,10,u,u,u,u,u,u]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm9, %xmm11, %xmm9
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,ymm2[0,6,12],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,8,14],zero,zero,ymm2[16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[18,24,30]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,6,12],zero,zero,zero,ymm0[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,22,28],zero,zero,zero
-; AVX512BW-NEXT:    vpor %ymm11, %ymm12, %ymm11
-; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm11[5,6,7]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu16 %ymm3, %ymm4 {%k3}
-; AVX512BW-NEXT:    vextracti128 $1, %ymm4, %xmm3
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u],zero,zero,zero,xmm3[4,10],zero,zero,zero,xmm3[2,8,14]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
-; AVX512BW-NEXT:    vpor %xmm11, %xmm12, %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-NEXT:    vmovdqu8 %ymm11, %ymm9 {%k2}
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[1,7,13],zero,zero,zero,xmm6[5,11,u,u,u,u,u,u]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512BW-NEXT:    vpor %xmm6, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[1,7,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,9,15],zero,zero,ymm2[17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,25,31]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[5,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[1,7,13],zero,zero,zero,ymm0[21,27],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[17,23,29],zero,zero,zero
-; AVX512BW-NEXT:    vpor %ymm2, %ymm0, %ymm0
-; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm3[u,u,u,u,u],zero,zero,zero,xmm3[5,11],zero,zero,zero,xmm3[3,9,15]
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
+; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqu16 %ymm4, %ymm0 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm2
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512BW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
 ; AVX512BW-NEXT:    vpor %xmm1, %xmm2, %xmm1
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
 ; AVX512BW-NEXT:    vmovdqa %ymm5, (%rsi)
-; AVX512BW-NEXT:    vmovdqa %ymm7, (%rdx)
-; AVX512BW-NEXT:    vmovdqa %ymm8, (%rcx)
-; AVX512BW-NEXT:    vmovdqa %ymm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa %ymm6, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %ymm7, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %ymm8, (%r8)
 ; AVX512BW-NEXT:    vmovdqa %ymm9, (%r9)
 ; AVX512BW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
@@ -4202,599 +4177,540 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX2-ONLY-LABEL: load_i8_stride6_vf64:
 ; AVX2-ONLY:       # %bb.0:
-; AVX2-ONLY-NEXT:    subq $488, %rsp # imm = 0x1E8
-; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm12
-; AVX2-ONLY-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa 224(%rdi), %ymm15
-; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX2-ONLY-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    subq $360, %rsp # imm = 0x168
+; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %ymm15
+; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm5
+; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,2,8,14,128,128,128,128,128,128,128,128,4,10,128,128,128,18,24,30,128,128,128,128,128,128,128,128,20,26]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm5, %ymm3
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,6,12,128,128,128,128,128,128,128,128,2,8,14,128,128,16,22,28,128,128,128,128,128,128,128,128,18,24,30,128,128]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm8 = ymm0[0,1],ymm1[0,1]
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm8, %ymm0
-; AVX2-ONLY-NEXT:    vpor %ymm3, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0>
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm0[0,1],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpblendvb %ymm6, %ymm12, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm0, %ymm4, %ymm6, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm2, %xmm6
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm3, %xmm13
-; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm13, %xmm6
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm11, %ymm6, %ymm1, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqa %ymm11, %ymm13
+; AVX2-ONLY-NEXT:    vpblendvb %ymm0, %ymm5, %ymm7, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa %ymm0, %ymm8
+; AVX2-ONLY-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm1, %xmm11
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm2, %xmm13
+; AVX2-ONLY-NEXT:    vpor %xmm11, %xmm13, %xmm11
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm11, %ymm9, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm12, %ymm15, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqa %ymm15, %ymm11
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
-; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm7, %xmm15
-; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm7
+; AVX2-ONLY-NEXT:    vmovdqa 224(%rdi), %ymm9
+; AVX2-ONLY-NEXT:    vpblendvb %ymm8, %ymm15, %ymm9, %ymm14
+; AVX2-ONLY-NEXT:    vmovdqa %ymm15, %ymm8
+; AVX2-ONLY-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm14, %xmm10
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm15, %xmm3
+; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm10, %xmm3
+; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm10
 ; AVX2-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm0[2,3],ymm7[2,3]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm0[0,1],ymm7[0,1]
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm10, %ymm0
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm7, %ymm9
-; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm9, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm15, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm0[0,1],ymm10[0,1]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm0[2,3],ymm10[2,3]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm6, %ymm11, %ymm10, %ymm6
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm3, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,3,9,15,128,128,128,128,128,128,128,128,5,11,128,128,128,19,25,31,128,128,128,128,128,128,128,128,21,27]
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm5, %ymm9
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = [1,7,13,128,128,128,128,128,128,128,128,3,9,15,128,128,17,23,29,128,128,128,128,128,128,128,128,19,25,31,128,128]
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm8, %ymm15
-; AVX2-ONLY-NEXT:    vpor %ymm9, %ymm15, %ymm9
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm15 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm0 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm2, %ymm9, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
-; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %ymm4, %ymm10, %ymm1
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm7, %ymm2
-; AVX2-ONLY-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm0 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,1,7,13,3,9,15,u,u,u,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm1, %ymm4, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm15, %xmm1
+; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
 ; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,4,10,128,128,128,128,128,128,128,128,0,6,12,128,128,128,20,26,128,128,128,128,128,128,128,128,16,22,28]
-; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm5, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,8,14,128,128,128,128,128,128,128,128,4,10,128,128,128,18,24,30,128,128,128,128,128,128,128,128,20,26,128,128,128]
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm8, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm1, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendvb %ymm4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm15 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm7, %ymm5, %ymm0
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm1, %xmm3
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm0, %xmm12
-; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm12, %xmm3
-; AVX2-ONLY-NEXT:    vmovdqa %ymm13, %ymm12
-; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm3, %ymm2, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendvb %ymm4, %ymm11, %ymm13, %ymm2
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm3, %xmm4
-; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
-; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm9, %xmm4
-; AVX2-ONLY-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %ymm15, %ymm10, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm7, %ymm9
-; AVX2-ONLY-NEXT:    vpor %ymm6, %ymm9, %ymm6
-; AVX2-ONLY-NEXT:    vpblendvb %ymm12, %ymm4, %ymm6, %ymm4
-; AVX2-ONLY-NEXT:    vmovdqa %ymm12, %ymm15
-; AVX2-ONLY-NEXT:    vmovdqu %ymm4, (%rsp) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29]
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm5, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = [3,9,15,128,128,128,128,128,128,128,128,5,11,128,128,128,19,25,31,128,128,128,128,128,128,128,128,21,27,128,128,128]
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm8, %ymm12
-; AVX2-ONLY-NEXT:    vpor %ymm6, %ymm12, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm0, %xmm3
+; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm3, %xmm3
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,2,8,14,4,10,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm4 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0>
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendvb %ymm4, %ymm11, %ymm12, %ymm2
+; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm2, %ymm6
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm3, %ymm6, %ymm3
+; AVX2-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm9, %ymm8, %ymm3
+; AVX2-ONLY-NEXT:    vmovdqa %ymm9, %ymm12
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm3, %xmm6
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm3, %xmm5
+; AVX2-ONLY-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendvb %ymm4, %ymm10, %ymm9, %ymm4
+; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm4, %ymm7
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm5, %ymm7, %ymm14
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
 ; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm0, %ymm6, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,3,9,15,5,11,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm0, %ymm2, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm6, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm3, %xmm2
+; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX2-ONLY-NEXT:    vpblendvb %ymm13, %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm3, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
-; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm7, %ymm2
-; AVX2-ONLY-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm0, %ymm1, %ymm14
 ; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm0, %ymm2, %ymm1
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm6 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm7 = <255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255,0,0,u,u,255,255>
-; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm0, %ymm2, %ymm8
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm1 = <255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255>
+; AVX2-ONLY-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm3
+; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm0, %ymm2, %ymm8
 ; AVX2-ONLY-NEXT:    vmovdqa 352(%rdi), %ymm4
 ; AVX2-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm5
-; AVX2-ONLY-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm4, %ymm5, %ymm12
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm4, %ymm5, %ymm2
-; AVX2-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpblendvb %ymm1, %ymm5, %ymm4, %ymm1
+; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm4, %ymm5, %ymm15
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm4, %ymm5, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendvb %ymm7, %ymm12, %ymm0, %ymm7
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,0,255,255,u,u,0,0,255,255,u,u,0,0,255,255,255,255,u,u,0,0,255,255,u,u,0,0,255,255,u,u>
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm9 # 32-byte Folded Reload
-; AVX2-ONLY-NEXT:    vpblendvb %ymm3, %ymm11, %ymm13, %ymm11
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX2-ONLY-NEXT:    vpblendvb %ymm0, %ymm11, %ymm2, %ymm13
+; AVX2-ONLY-NEXT:    vpblendvb %ymm0, %ymm10, %ymm9, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm3, %xmm4
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm3, %xmm0
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm5, %xmm10
+; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm10
 ; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm10, %xmm4
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm1, %xmm4
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm2, %xmm2
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, (%rsp), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX2-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm14, %ymm2, %ymm2
+; AVX2-ONLY-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
-; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm5, %xmm1
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX2-ONLY-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm1, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm2, %xmm6, %xmm1
+; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm14, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa %ymm8, %ymm11
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm8, %xmm2
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm8, %xmm14
-; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm14, %xmm4
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm14
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm14, %xmm0
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm9, %xmm8
-; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm8, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,6,12],zero,zero,zero,ymm10[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm10[16,22,28],zero,zero,zero
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,0,6,12,128,128,128,128,128,128,128,128,2,8,14,128,128,16,22,28,128,128,128,128,128,128,128,128,18,24,30]
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm8, %ymm3, %ymm13
-; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm13, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
+; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm2, %xmm3
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm11, %xmm6
+; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm6, %xmm3
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm6, %xmm0
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm5, %xmm14
+; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm14, %xmm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,4,10,0,6,12,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm13, %ymm0
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm12, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u],zero,zero,zero,xmm0[4,10],zero,zero,zero,xmm0[2,8,14]
-; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm12, %xmm7
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm7, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm12, %xmm5
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm11, %xmm6
-; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm6, %xmm5
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm6 = ymm11[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[0,6,12],zero,zero,zero,ymm11[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[16,22,28],zero,zero,zero
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %ymm8, %ymm7, %ymm4
-; AVX2-ONLY-NEXT:    vpor %ymm6, %ymm4, %ymm4
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm4[5,6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm15, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm15, %xmm9
+; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm9, %xmm1
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm7, %xmm4
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm3, %ymm8
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm8[5,6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm4, %ymm1, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX2-ONLY-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm4, %xmm6
-; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm6, %xmm2
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm14, %xmm8
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm9, %xmm9
-; AVX2-ONLY-NEXT:    vpor %xmm8, %xmm9, %xmm8
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29,128,128,128]
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm10, %ymm13
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,1,7,13,128,128,128,128,128,128,128,128,3,9,15,128,128,17,23,29,128,128,128,128,128,128,128,128,19,25,31]
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm3, %ymm3
-; AVX2-ONLY-NEXT:    vpor %ymm3, %ymm13, %ymm3
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3,4],xmm3[5,6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm4, %ymm1, %ymm1
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm11, %xmm12
+; AVX2-ONLY-NEXT:    vpor %xmm2, %xmm12, %xmm2
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm14 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %xmm14, %xmm5, %xmm5
+; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,5,11,1,7,13,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm13, %ymm11
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm11[5,6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm11[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm3, %ymm2, %ymm2
-; AVX2-ONLY-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm12, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX2-ONLY-NEXT:    vpshufb %ymm9, %ymm11, %ymm3
-; AVX2-ONLY-NEXT:    vpshufb %ymm14, %ymm7, %ymm5
-; AVX2-ONLY-NEXT:    vpor %ymm3, %ymm5, %ymm3
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm5, %ymm2, %ymm2
+; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm15, %xmm4
+; AVX2-ONLY-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX2-ONLY-NEXT:    vpshufb %xmm12, %xmm9, %xmm4
+; AVX2-ONLY-NEXT:    vpshufb %xmm14, %xmm7, %xmm5
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX2-ONLY-NEXT:    vpshufb %ymm6, %ymm3, %ymm5
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vpblendvb %ymm15, %ymm1, %ymm0, %ymm0
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm4, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm1, %xmm5
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm4, %xmm7
+; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm4, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm4
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm4, %xmm6
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
+; AVX2-ONLY-NEXT:    vmovdqa %ymm9, %ymm10
+; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm9, %xmm8
+; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm9, %xmm7
+; AVX2-ONLY-NEXT:    vmovdqa %ymm9, %ymm11
 ; AVX2-ONLY-NEXT:    vpor %xmm5, %xmm7, %xmm5
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7],ymm7[8,9,10],ymm5[11,12,13,14,15]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm10, %xmm10
+; AVX2-ONLY-NEXT:    vpor %xmm4, %xmm10, %xmm4
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0,1,2],ymm4[3,4,5,6,7],ymm10[8,9,10],ymm4[11,12,13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
+; AVX2-ONLY-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
+; AVX2-ONLY-NEXT:    vpshufb %xmm9, %xmm11, %xmm8
+; AVX2-ONLY-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm8, %xmm7
-; AVX2-ONLY-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm8, %xmm6
-; AVX2-ONLY-NEXT:    vmovdqa %ymm8, %ymm10
-; AVX2-ONLY-NEXT:    vpor %xmm3, %xmm6, %xmm3
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3,4,5,6,7],ymm6[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
-; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm4, %xmm9
-; AVX2-ONLY-NEXT:    vpor %xmm1, %xmm9, %xmm1
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0,1,2],ymm1[3,4,5,6,7],ymm9[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-ONLY-NEXT:    vpshufb %xmm6, %xmm7, %xmm6
-; AVX2-ONLY-NEXT:    vpshufb %xmm8, %xmm10, %xmm7
-; AVX2-ONLY-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-ONLY-NEXT:    vmovdqa %ymm3, 32(%rsi)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm5, (%rsi)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm6, 32(%rdx)
-; AVX2-ONLY-NEXT:    vmovdqa %ymm1, (%rdx)
-; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vmovaps %ymm1, 32(%rcx)
-; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vmovaps %ymm1, (%rcx)
-; AVX2-ONLY-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vmovaps %ymm1, 32(%r8)
-; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vmovaps %ymm1, (%r8)
-; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX2-ONLY-NEXT:    vmovaps %ymm1, 32(%r9)
+; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0,1,2],ymm7[3,4,5,6,7],ymm8[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX2-ONLY-NEXT:    vmovdqa %ymm5, 32(%rsi)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm6, (%rsi)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm7, 32(%rdx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm4, (%rdx)
+; AVX2-ONLY-NEXT:    vmovups (%rsp), %ymm4 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vmovaps %ymm4, 32(%rcx)
+; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vmovaps %ymm4, (%rcx)
+; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vmovaps %ymm4, 32(%r8)
+; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-ONLY-NEXT:    vmovaps %ymm4, (%r8)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm1, 32(%r9)
 ; AVX2-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vmovaps %ymm1, (%r9)
 ; AVX2-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm0, 32(%rax)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm2, (%rax)
-; AVX2-ONLY-NEXT:    addq $488, %rsp # imm = 0x1E8
+; AVX2-ONLY-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
 ; AVX512F-LABEL: load_i8_stride6_vf64:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-NEXT:    vmovdqa64 224(%rdi), %ymm19
-; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm21
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm0
-; AVX512F-NEXT:    vpternlogq $202, %ymm19, %ymm21, %ymm0
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm0, %xmm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
+; AVX512F-NEXT:    subq $136, %rsp
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa64 224(%rdi), %ymm29
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm31
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm29, %ymm31, %ymm10
+; AVX512F-NEXT:    vpshufb %xmm0, %xmm10, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
+; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm11, %xmm3
+; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm1
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm28
+; AVX512F-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm27
+; AVX512F-NEXT:    vmovdqa64 160(%rdi), %ymm20
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm27, %ymm20, %ymm12
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm13
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
+; AVX512F-NEXT:    vpshufb %xmm4, %xmm12, %xmm9
+; AVX512F-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
+; AVX512F-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512F-NEXT:    vpternlogq $202, %ymm30, %ymm28, %ymm14
+; AVX512F-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
+; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm15, %xmm1
+; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm5
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm24
+; AVX512F-NEXT:    vmovdqa64 352(%rdi), %ymm23
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm10, %xmm1, %xmm4
-; AVX512F-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm1, %xmm3
+; AVX512F-NEXT:    vpshufb %xmm4, %xmm0, %xmm4
+; AVX512F-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vinserti32x4 $1, %xmm3, %ymm0, %ymm17
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm11, %xmm11
+; AVX512F-NEXT:    vpor %xmm4, %xmm11, %xmm2
 ; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm5
-; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm23
-; AVX512F-NEXT:    vmovdqa64 160(%rdi), %ymm18
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm2
-; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm18, %ymm2
-; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm4, %xmm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm2, %xmm9
-; AVX512F-NEXT:    vpor %xmm6, %xmm9, %xmm6
-; AVX512F-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm5[2,3],mem[2,3]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm9[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[4,10],zero,zero,zero,ymm9[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[20,26]
-; AVX512F-NEXT:    vinserti128 $1, 96(%rdi), %ymm5, %ymm12
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm5 = ymm12[0,6,12],zero,zero,zero,ymm12[u,u,u,u,u,2,8,14],zero,zero,ymm12[16,22,28],zero,zero,zero,ymm12[u,u,u,u,u,18,24,30],zero,zero
-; AVX512F-NEXT:    vpor %ymm6, %ymm5, %ymm5
-; AVX512F-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm22
-; AVX512F-NEXT:    vmovdqa64 32(%rdi), %ymm20
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm5
-; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm22, %ymm5
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
-; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-NEXT:    vpshufb %xmm10, %xmm6, %xmm10
-; AVX512F-NEXT:    vporq %xmm3, %xmm10, %xmm16
-; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm17
-; AVX512F-NEXT:    vmovdqa64 352(%rdi), %ymm26
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm3
-; AVX512F-NEXT:    vpternlogq $202, %ymm17, %ymm26, %ymm3
-; AVX512F-NEXT:    vextracti128 $1, %ymm3, %xmm10
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm10, %xmm8
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm3, %xmm13
-; AVX512F-NEXT:    vpor %xmm8, %xmm13, %xmm8
-; AVX512F-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
-; AVX512F-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm9[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[5,11],zero,zero,zero,ymm9[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[21,27]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[1,7,13],zero,zero,zero,ymm12[u,u,u,u,u,3,9,15],zero,zero,ymm12[17,23,29],zero,zero,zero,ymm12[u,u,u,u,u,19,25,31],zero,zero
-; AVX512F-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm5, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm13, %xmm6, %xmm2
-; AVX512F-NEXT:    vporq %xmm1, %xmm2, %xmm31
-; AVX512F-NEXT:    vpshufb %xmm0, %xmm10, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
-; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
+; AVX512F-NEXT:    vpshufb %xmm4, %xmm13, %xmm11
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
+; AVX512F-NEXT:    vpshufb %xmm13, %xmm12, %xmm12
+; AVX512F-NEXT:    vpor %xmm11, %xmm12, %xmm2
+; AVX512F-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
+; AVX512F-NEXT:    vpshufb %xmm10, %xmm15, %xmm10
+; AVX512F-NEXT:    vpor %xmm3, %xmm10, %xmm2
+; AVX512F-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX512F-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-NEXT:    vporq %xmm1, %xmm0, %xmm26
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-NEXT:    vmovdqa %ymm2, %ymm4
-; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm19, %ymm4
-; AVX512F-NEXT:    vextracti128 $1, %ymm4, %xmm8
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm8, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm13, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm31, %ymm29, %ymm10
+; AVX512F-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm4, %xmm5
-; AVX512F-NEXT:    vpor %xmm0, %xmm5, %xmm0
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
+; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
 ; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm6
-; AVX512F-NEXT:    vpternlogq $202, %ymm18, %ymm23, %ymm6
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm6, %xmm0
-; AVX512F-NEXT:    vmovdqa64 %xmm5, %xmm24
-; AVX512F-NEXT:    vextracti128 $1, %ymm6, %xmm10
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm10, %xmm13
-; AVX512F-NEXT:    vpor %xmm0, %xmm13, %xmm0
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm27, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm11
+; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm4, %xmm12
+; AVX512F-NEXT:    vpor %xmm11, %xmm12, %xmm2
+; AVX512F-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-NEXT:    vmovdqa %ymm13, %ymm11
+; AVX512F-NEXT:    vpternlogq $202, %ymm28, %ymm30, %ymm11
+; AVX512F-NEXT:    vextracti128 $1, %ymm11, %xmm14
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
+; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm12
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm3
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm3, %xmm8
+; AVX512F-NEXT:    vextracti128 $1, %ymm3, %xmm9
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm9, %xmm7
+; AVX512F-NEXT:    vpor %xmm7, %xmm8, %xmm1
+; AVX512F-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm15, %xmm8
+; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm10, %xmm10
+; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm18
+; AVX512F-NEXT:    vpor %xmm8, %xmm10, %xmm1
+; AVX512F-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,4,10,128,128,128,128,128,128,128,128,0,6,12,128,128,128,20,26,128,128,128,128,128,128,128,128,16,22,28]
-; AVX512F-NEXT:    vpshufb %ymm11, %ymm9, %ymm0
-; AVX512F-NEXT:    vmovdqa %ymm9, %ymm15
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <2,8,14,128,128,u,u,u,u,u,u,4,10,128,128,128,18,24,30,128,128,u,u,u,u,u,u,20,26,128,128,128>
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm12, %ymm13
-; AVX512F-NEXT:    vpor %ymm0, %ymm13, %ymm0
-; AVX512F-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa %ymm2, %ymm0
-; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm20, %ymm0
-; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
-; AVX512F-NEXT:    vporq %xmm1, %xmm3, %xmm27
-; AVX512F-NEXT:    vmovdqa %ymm7, %ymm14
-; AVX512F-NEXT:    vpternlogq $202, %ymm26, %ymm17, %ymm14
-; AVX512F-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm14, %xmm3
-; AVX512F-NEXT:    vextracti128 $1, %ymm14, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm1, %xmm5
-; AVX512F-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512F-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa 256(%rdi), %ymm3
-; AVX512F-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm3[2,3],mem[2,3]
-; AVX512F-NEXT:    vpshufb %ymm11, %ymm5, %ymm11
-; AVX512F-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm3
-; AVX512F-NEXT:    vpshufb %ymm9, %ymm3, %ymm9
-; AVX512F-NEXT:    vporq %ymm11, %ymm9, %ymm30
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
-; AVX512F-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX512F-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
-; AVX512F-NEXT:    vmovdqa64 %xmm4, %xmm29
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm10, %xmm10
-; AVX512F-NEXT:    vpor %xmm6, %xmm10, %xmm4
-; AVX512F-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29]
-; AVX512F-NEXT:    vpshufb %ymm6, %ymm15, %ymm10
-; AVX512F-NEXT:    vmovdqa64 %ymm15, %ymm28
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = <3,9,15,128,128,u,u,u,u,u,u,5,11,128,128,128,19,25,31,128,128,u,u,u,u,u,u,21,27,128,128,128>
-; AVX512F-NEXT:    vpshufb %ymm4, %ymm12, %ymm15
-; AVX512F-NEXT:    vmovdqa64 %ymm12, %ymm24
-; AVX512F-NEXT:    vpor %ymm10, %ymm15, %ymm10
-; AVX512F-NEXT:    vmovdqu %ymm10, (%rsp) # 32-byte Spill
-; AVX512F-NEXT:    vpshufb %xmm9, %xmm13, %xmm9
-; AVX512F-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-NEXT:    vporq %xmm9, %xmm0, %xmm25
-; AVX512F-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} ymm21 = ymm0[2,3],mem[2,3]
+; AVX512F-NEXT:    vinserti32x4 $1, 288(%rdi), %ymm0, %ymm22
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm15
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm15
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm15[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm17
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} ymm10 = ymm17[2,3],mem[2,3]
+; AVX512F-NEXT:    vinserti32x4 $1, 96(%rdi), %ymm17, %ymm25
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm17
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm4
+; AVX512F-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $248, %ymm2, %ymm5, %ymm0
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm0 # 64-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm17
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm15
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7],ymm0[8,9,10],ymm15[11,12,13,14,15]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
+; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload
+; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm15
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm26
+; AVX512F-NEXT:    vpternlogq $184, %zmm15, %zmm5, %zmm26
+; AVX512F-NEXT:    vmovdqa64 %xmm19, %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512F-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vmovdqa64 %xmm18, %xmm2
+; AVX512F-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
+; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm14
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm3, %xmm0
+; AVX512F-NEXT:    vpshufb %xmm7, %xmm9, %xmm2
+; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-NEXT:    vpshufb %ymm6, %ymm5, %ymm0
-; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm1
-; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm6
-; AVX512F-NEXT:    vmovdqa64 %ymm19, %ymm12
-; AVX512F-NEXT:    vpternlogq $226, %ymm21, %ymm7, %ymm12
-; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm1[0,6,12],zero,zero,zero,xmm1[4,10,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm12[4,10],zero,zero,zero,xmm12[2,8,14],zero,zero,xmm12[u,u,u,u,u,u]
-; AVX512F-NEXT:    vporq %xmm0, %xmm4, %xmm21
-; AVX512F-NEXT:    vmovdqa64 %ymm18, %ymm13
-; AVX512F-NEXT:    vpternlogq $226, %ymm23, %ymm2, %ymm13
-; AVX512F-NEXT:    vextracti128 $1, %ymm13, %xmm4
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm4, %xmm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm9, %xmm13, %xmm10
-; AVX512F-NEXT:    vporq %xmm0, %xmm10, %xmm23
-; AVX512F-NEXT:    vpternlogq $202, %ymm17, %ymm26, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
+; AVX512F-NEXT:    vmovdqa64 %ymm29, %ymm4
+; AVX512F-NEXT:    vpternlogq $226, %ymm31, %ymm16, %ymm4
+; AVX512F-NEXT:    vextracti128 $1, %ymm4, %xmm11
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm11, %xmm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[4,10],zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[u,u,u,u,u,u]
+; AVX512F-NEXT:    vporq %xmm0, %xmm2, %xmm29
+; AVX512F-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-NEXT:    vpternlogq $226, %ymm27, %ymm13, %ymm2
 ; AVX512F-NEXT:    vextracti128 $1, %ymm2, %xmm0
-; AVX512F-NEXT:    vpshufb %xmm8, %xmm0, %xmm8
-; AVX512F-NEXT:    vpshufb %xmm9, %xmm2, %xmm9
-; AVX512F-NEXT:    vpor %xmm8, %xmm9, %xmm8
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <128,128,0,6,12,u,u,u,u,u,128,128,128,2,8,14,128,128,16,22,28,u,u,u,u,u,128,128,128,18,24,30>
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,6,12],zero,zero,zero,ymm3[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,22,28],zero,zero,zero
-; AVX512F-NEXT:    vpshufb %ymm11, %ymm5, %ymm10
-; AVX512F-NEXT:    vmovdqa64 %ymm11, %ymm29
-; AVX512F-NEXT:    vpor %ymm9, %ymm10, %ymm9
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm10 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm10, %xmm1, %xmm11
-; AVX512F-NEXT:    vmovdqa64 %xmm10, %xmm19
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm12, %xmm12
-; AVX512F-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-NEXT:    vporq %xmm11, %xmm12, %xmm17
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
+; AVX512F-NEXT:    vpshufb %xmm5, %xmm0, %xmm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm7
+; AVX512F-NEXT:    vporq %xmm3, %xmm7, %xmm20
+; AVX512F-NEXT:    vpternlogq $202, %ymm28, %ymm30, %ymm16
+; AVX512F-NEXT:    vmovdqa %ymm1, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm25, %ymm10, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm13
+; AVX512F-NEXT:    vextracti128 $1, %ymm13, %xmm7
+; AVX512F-NEXT:    vpshufb %xmm5, %xmm7, %xmm5
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm13, %xmm6
+; AVX512F-NEXT:    vporq %xmm5, %xmm6, %xmm19
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm5, %xmm11, %xmm6
+; AVX512F-NEXT:    vmovdqa64 %xmm5, %xmm23
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm11 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
+; AVX512F-NEXT:    vpshufb %xmm11, %xmm4, %xmm9
+; AVX512F-NEXT:    vporq %xmm6, %xmm9, %xmm27
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX512F-NEXT:    vpshufb %xmm15, %xmm13, %xmm14
-; AVX512F-NEXT:    vporq %xmm4, %xmm14, %xmm26
-; AVX512F-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29,128,128,128]
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm12 = <128,128,1,7,13,u,u,u,u,u,128,128,128,3,9,15,128,128,17,23,29,u,u,u,u,u,128,128,128,19,25,31>
-; AVX512F-NEXT:    vpshufb %ymm2, %ymm3, %ymm14
-; AVX512F-NEXT:    vpshufb %ymm12, %ymm5, %ymm15
-; AVX512F-NEXT:    vpor %ymm14, %ymm15, %ymm14
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,ymm5[2,8,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,10],zero,zero,zero,ymm5[18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[20,26]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm13 = ymm3[0,6,12],zero,zero,zero,ymm3[u,u,u,u,u,2,8,14],zero,zero,ymm3[16,22,28],zero,zero,zero,ymm3[u,u,u,u,u,18,24,30],zero,zero
-; AVX512F-NEXT:    vpor %ymm15, %ymm13, %ymm13
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 16-byte Folded Reload
-; AVX512F-NEXT:    vpblendw {{.*#+}} ymm15 = ymm13[0,1,2],ymm15[3,4,5,6,7],ymm13[8,9,10],ymm15[11,12,13,14,15]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm20, %ymm7
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm5[3,9,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,11],zero,zero,zero,ymm5[19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[21,27]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[1,7,13],zero,zero,zero,ymm3[u,u,u,u,u,3,9,15],zero,zero,ymm3[17,23,29],zero,zero,zero,ymm3[u,u,u,u,u,19,25,31],zero,zero
-; AVX512F-NEXT:    vpor %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 16-byte Folded Reload
-; AVX512F-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7],ymm3[8,9,10],ymm5[11,12,13,14,15]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-NEXT:    vextracti128 $1, %ymm7, %xmm5
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm5[0,6,12],zero,zero,zero,xmm5[4,10,u,u,u,u,u,u]
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[4,10],zero,zero,zero,xmm7[2,8,14],zero,zero,xmm7[u,u,u,u,u,u]
-; AVX512F-NEXT:    vpor %xmm15, %xmm10, %xmm10
-; AVX512F-NEXT:    vmovdqa64 %ymm24, %ymm4
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[4,10],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,6,12],zero,zero,zero,ymm4[20,26],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,22,28],zero,zero,zero
-; AVX512F-NEXT:    vmovdqa64 %ymm28, %ymm11
-; AVX512F-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512F-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
-; AVX512F-NEXT:    vpor %ymm1, %ymm15, %ymm1
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-NEXT:    vinserti32x4 $1, %xmm23, %ymm0, %ymm10
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm21, %zmm10, %zmm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,4,10,0,6,12,18,24,30,20,26,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-NEXT:    vpshufb %ymm2, %ymm3, %ymm4
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $236, %ymm18, %ymm4, %ymm12
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,5,11,1,7,13,19,25,31,21,27,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpternlogq $236, %ymm18, %ymm3, %ymm14
+; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 16-byte Folded Reload
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm2, %ymm1, %ymm2
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm3, %ymm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa64 %ymm16, %ymm6
+; AVX512F-NEXT:    vextracti32x4 $1, %ymm16, %xmm3
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm3, %xmm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm8 = xmm6[4,10],zero,zero,zero,xmm6[2,8,14],zero,zero,xmm6[u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
+; AVX512F-NEXT:    vpshufb %xmm15, %xmm13, %xmm8
+; AVX512F-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vpternlogq $226, %ymm25, %ymm8, %ymm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = ymm10[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm9[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 16-byte Folded Reload
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm9, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vinserti32x4 $1, %xmm19, %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = ymm8[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $242, %ymm5, %ymm13, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm5
+; AVX512F-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm29, %zmm9, %zmm9
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, %zmm10, %zmm15, %zmm1
-; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $226, %ymm8, %ymm10, %ymm9
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm8
+; AVX512F-NEXT:    vpternlogq $226, %zmm9, %zmm15, %zmm4
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm8
-; AVX512F-NEXT:    vmovdqa64 %xmm19, %xmm1
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; AVX512F-NEXT:    vmovdqa64 %xmm18, %xmm5
-; AVX512F-NEXT:    vpshufb %xmm5, %xmm7, %xmm5
-; AVX512F-NEXT:    vpor %xmm1, %xmm5, %xmm1
-; AVX512F-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
-; AVX512F-NEXT:    vpshufb %ymm12, %ymm11, %ymm5
-; AVX512F-NEXT:    vpor %ymm2, %ymm5, %ymm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm5
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512F-NEXT:    vpternlogq $226, %zmm2, %zmm15, %zmm1
+; AVX512F-NEXT:    vpternlogq $184, %zmm4, %zmm9, %zmm5
+; AVX512F-NEXT:    vmovdqa64 %xmm23, %xmm4
+; AVX512F-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX512F-NEXT:    vpshufb %xmm11, %xmm6, %xmm4
+; AVX512F-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm4 = ymm10[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm7 = ymm8[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $242, %ymm4, %ymm13, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm4
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-NEXT:    vpternlogq $226, %ymm0, %ymm10, %ymm14
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
-; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm0
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm16 # 32-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm31 # 32-byte Folded Reload
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm16
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm31
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm27 # 32-byte Folded Reload
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm27
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $226, (%rsp), %ymm2, %ymm25 # 32-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm25
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $184, %ymm1, %ymm2, %ymm30
-; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX512F-NEXT:    vpternlogq $184, %ymm1, %ymm2, %ymm6
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpternlogq $184, %zmm16, %zmm1, %zmm5
-; AVX512F-NEXT:    vpternlogq $184, %zmm31, %zmm1, %zmm3
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm2
-; AVX512F-NEXT:    vpternlogq $184, %zmm27, %zmm1, %zmm2
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm4
-; AVX512F-NEXT:    vpternlogq $184, %zmm25, %zmm1, %zmm4
-; AVX512F-NEXT:    vmovdqa64 %zmm5, (%rsi)
-; AVX512F-NEXT:    vmovdqa64 %zmm3, (%rdx)
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm0
+; AVX512F-NEXT:    vpternlogq $226, %zmm0, %zmm15, %zmm3
+; AVX512F-NEXT:    vpternlogq $184, %zmm3, %zmm9, %zmm4
+; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512F-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm12
+; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512F-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm14
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $184, %zmm12, %zmm0, %zmm2
+; AVX512F-NEXT:    vpternlogq $184, %zmm14, %zmm0, %zmm1
+; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm26, (%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rcx)
-; AVX512F-NEXT:    vmovdqa64 %zmm4, (%r8)
-; AVX512F-NEXT:    vmovdqa64 %zmm8, (%r9)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm5, (%r9)
 ; AVX512F-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512F-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-NEXT:    addq $136, %rsp
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4803,260 +4719,234 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
 ; AVX512BW-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm24
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm23
 ; AVX512BW-NEXT:    movw $18724, %r10w # imm = 0x4924
 ; AVX512BW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm24, %ymm10 {%k1}
-; AVX512BW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
-; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm14
-; AVX512BW-NEXT:    vpshufb %xmm6, %xmm14, %xmm3
-; AVX512BW-NEXT:    vpor %xmm1, %xmm3, %xmm4
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %ymm22
-; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm5
-; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm12
-; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512BW-NEXT:    vpblendmw %ymm12, %ymm3, %ymm15 {%k1}
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm2, %xmm9, %xmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
+; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm12
+; AVX512BW-NEXT:    vpshufb %xmm4, %xmm12, %xmm3
+; AVX512BW-NEXT:    vpor %xmm1, %xmm3, %xmm5
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm6
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm26
+; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm1
+; AVX512BW-NEXT:    vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm16, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm16, %xmm11
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm15, %xmm8
-; AVX512BW-NEXT:    vpor %xmm7, %xmm8, %xmm7
-; AVX512BW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm7
-; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm5, %ymm4
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,6,12,128,128,128,128,128,128,128,128,2,8,14,128,128,16,22,28,128,128,128,128,128,128,128,128,18,24,30,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm8, %ymm4, %ymm13
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],mem[2,3]
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [128,128,128,2,8,14,128,128,128,128,128,128,128,128,4,10,128,128,128,18,24,30,128,128,128,128,128,128,128,128,20,26]
-; AVX512BW-NEXT:    vpshufb %ymm19, %ymm5, %ymm20
-; AVX512BW-NEXT:    vporq %ymm13, %ymm20, %ymm13
-; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm20 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm15, %xmm13
+; AVX512BW-NEXT:    vpor %xmm11, %xmm13, %xmm11
+; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm5, %zmm11, %zmm11
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm6, %ymm13
+; AVX512BW-NEXT:    movw $-28124, %r10w # imm = 0x9224
+; AVX512BW-NEXT:    kmovd %r10d, %k4
+; AVX512BW-NEXT:    vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
 ; AVX512BW-NEXT:    vpshufb %xmm2, %xmm20, %xmm2
 ; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
-; AVX512BW-NEXT:    vpshufb %xmm6, %xmm21, %xmm6
-; AVX512BW-NEXT:    vpor %xmm2, %xmm6, %xmm2
+; AVX512BW-NEXT:    vpshufb %xmm4, %xmm21, %xmm4
+; AVX512BW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,0,6,12,2,8,14,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-NEXT:    kmovd %r10d, %k3
-; AVX512BW-NEXT:    vmovdqu8 %ymm13, %ymm2 {%k3}
-; AVX512BW-NEXT:    vmovdqu16 %zmm7, %zmm2 {%k3}
-; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512BW-NEXT:    vinserti128 $1, 288(%rdi), %ymm7, %ymm6
-; AVX512BW-NEXT:    vpshufb %ymm8, %ymm6, %ymm8
-; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],mem[2,3]
-; AVX512BW-NEXT:    vpshufb %ymm19, %ymm7, %ymm13
-; AVX512BW-NEXT:    vpor %ymm8, %ymm13, %ymm9
-; AVX512BW-NEXT:    vmovdqa64 320(%rdi), %ymm28
-; AVX512BW-NEXT:    vmovdqa 352(%rdi), %ymm8
-; AVX512BW-NEXT:    vpblendmw %ymm28, %ymm8, %ymm19 {%k1}
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm19, %xmm23
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm23, %xmm17
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm19, %xmm18
+; AVX512BW-NEXT:    kmovd %r10d, %k2
+; AVX512BW-NEXT:    vpshufb %ymm6, %ymm19, %ymm2 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm11, %zmm2 {%k2}
+; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm11
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm14
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
+; AVX512BW-NEXT:    vpshufb %ymm6, %ymm22, %ymm7
+; AVX512BW-NEXT:    vmovdqa 320(%rdi), %ymm11
+; AVX512BW-NEXT:    vmovdqa 352(%rdi), %ymm6
+; AVX512BW-NEXT:    vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm24, %xmm25
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm25, %xmm17
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm24, %xmm18
 ; AVX512BW-NEXT:    vporq %xmm17, %xmm18, %xmm17
-; AVX512BW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm11
-; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm9[0,1,2],ymm11[3,4,5,6,7],ymm9[8,9,10],ymm11[11,12,13,14,15]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512BW-NEXT:    movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
-; AVX512BW-NEXT:    kmovq %rdi, %k2
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm2 {%k2}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm10, %xmm10
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm14, %xmm14
-; AVX512BW-NEXT:    vpor %xmm10, %xmm14, %xmm10
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
-; AVX512BW-NEXT:    vpshufb %xmm14, %xmm16, %xmm16
+; AVX512BW-NEXT:    kmovq %rdi, %k3
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm2 {%k3}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm12, %xmm12
+; AVX512BW-NEXT:    vpor %xmm8, %xmm12, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm16, %xmm16
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
 ; AVX512BW-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
 ; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [1,7,13,128,128,128,128,128,128,128,128,3,9,15,128,128,17,23,29,128,128,128,128,128,128,128,128,19,25,31,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm16, %ymm4, %ymm10
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [128,128,128,3,9,15,128,128,128,128,128,128,128,128,5,11,128,128,128,19,25,31,128,128,128,128,128,128,128,128,21,27]
-; AVX512BW-NEXT:    vpshufb %ymm18, %ymm5, %ymm25
-; AVX512BW-NEXT:    vporq %ymm10, %ymm25, %ymm25
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm20, %xmm9
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm21, %xmm10
-; AVX512BW-NEXT:    vpor %xmm9, %xmm10, %xmm10
-; AVX512BW-NEXT:    vmovdqu8 %ymm25, %ymm10 {%k3}
-; AVX512BW-NEXT:    vmovdqu16 %zmm15, %zmm10 {%k3}
-; AVX512BW-NEXT:    vpshufb %ymm16, %ymm6, %ymm9
-; AVX512BW-NEXT:    vpshufb %ymm18, %ymm7, %ymm11
-; AVX512BW-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512BW-NEXT:    vpshufb %xmm14, %xmm23, %xmm11
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm19, %xmm14
-; AVX512BW-NEXT:    vpor %xmm11, %xmm14, %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm9[0,1,2],ymm11[3,4,5,6,7],ymm9[8,9,10],ymm11[11,12,13,14,15]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5,6,7]
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm10 {%k2}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = [2,8,14,128,128,128,128,128,128,128,128,4,10,128,128,128,18,24,30,128,128,128,128,128,128,128,128,20,26,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm9, %ymm4, %ymm11
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,4,10,128,128,128,128,128,128,128,128,0,6,12,128,128,128,20,26,128,128,128,128,128,128,128,128,16,22,28]
-; AVX512BW-NEXT:    vpshufb %ymm15, %ymm5, %ymm14
-; AVX512BW-NEXT:    vpor %ymm11, %ymm14, %ymm11
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm15, %zmm8
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm20, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm21, %xmm9
+; AVX512BW-NEXT:    vpor %xmm7, %xmm9, %xmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,1,7,13,3,9,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm19, %ymm9 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm22, %ymm7
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm25, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm24, %xmm12
+; AVX512BW-NEXT:    vpor %xmm8, %xmm12, %xmm8
+; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm9 {%k3}
+; AVX512BW-NEXT:    vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
 ; AVX512BW-NEXT:    movw $9362, %di # imm = 0x2492
-; AVX512BW-NEXT:    kmovd %edi, %k3
-; AVX512BW-NEXT:    vpblendmw %ymm22, %ymm1, %ymm16 {%k3}
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm16, %xmm17
-; AVX512BW-NEXT:    vpshufb %xmm14, %xmm17, %xmm18
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm19, %xmm16, %xmm20
-; AVX512BW-NEXT:    vporq %xmm18, %xmm20, %xmm18
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm8, %xmm16
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm16, %xmm12
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm8, %xmm18
+; AVX512BW-NEXT:    vporq %xmm12, %xmm18, %xmm18
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,2,8,14,4,10,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
-; AVX512BW-NEXT:    kmovd %edi, %k4
-; AVX512BW-NEXT:    vmovdqu8 %ymm11, %ymm18 {%k4}
-; AVX512BW-NEXT:    vpblendmw %ymm24, %ymm0, %ymm11 {%k3}
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm11, %xmm20
-; AVX512BW-NEXT:    vpshufb %xmm14, %xmm20, %xmm14
-; AVX512BW-NEXT:    vpshufb %xmm19, %xmm11, %xmm19
-; AVX512BW-NEXT:    vporq %xmm14, %xmm19, %xmm14
-; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm12, %ymm19 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX512BW-NEXT:    vpshufb %xmm21, %xmm19, %xmm23
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm19, %xmm25
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX512BW-NEXT:    vpshufb %xmm26, %xmm25, %xmm27
-; AVX512BW-NEXT:    vporq %xmm23, %xmm27, %xmm23
-; AVX512BW-NEXT:    vinserti32x4 $1, %xmm23, %ymm0, %ymm23
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm14, %zmm23, %zmm14
+; AVX512BW-NEXT:    kmovd %edi, %k5
+; AVX512BW-NEXT:    vpshufb %ymm19, %ymm15, %ymm18 {%k5}
+; AVX512BW-NEXT:    vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm21, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm20, %xmm12
+; AVX512BW-NEXT:    vpor %xmm7, %xmm12, %xmm7
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm22, %xmm17, %xmm12
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm17, %xmm24
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm24, %xmm27
+; AVX512BW-NEXT:    vporq %xmm12, %xmm27, %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm12
 ; AVX512BW-NEXT:    movl $2097151, %edi # imm = 0x1FFFFF
-; AVX512BW-NEXT:    kmovq %rdi, %k5
-; AVX512BW-NEXT:    vmovdqu8 %zmm18, %zmm14 {%k5}
-; AVX512BW-NEXT:    vpshufb %ymm9, %ymm6, %ymm9
-; AVX512BW-NEXT:    vpshufb %ymm15, %ymm7, %ymm15
-; AVX512BW-NEXT:    vpor %ymm9, %ymm15, %ymm9
-; AVX512BW-NEXT:    vpblendmw %ymm8, %ymm28, %ymm18 {%k1}
-; AVX512BW-NEXT:    vpshufb %xmm21, %xmm18, %xmm15
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm18, %xmm21
-; AVX512BW-NEXT:    vpshufb %xmm26, %xmm21, %xmm23
-; AVX512BW-NEXT:    vporq %xmm15, %xmm23, %xmm15
-; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm15 {%k4}
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm14 {%k2}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm9 = [3,9,15,128,128,128,128,128,128,128,128,5,11,128,128,128,19,25,31,128,128,128,128,128,128,128,128,21,27,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm9, %ymm4, %ymm15
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29]
-; AVX512BW-NEXT:    vpshufb %ymm23, %ymm5, %ymm26
-; AVX512BW-NEXT:    vporq %ymm15, %ymm26, %ymm15
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm26, %xmm17, %xmm17
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm27, %xmm16, %xmm16
-; AVX512BW-NEXT:    vporq %xmm17, %xmm16, %xmm16
-; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm16 {%k4}
-; AVX512BW-NEXT:    vpshufb %xmm26, %xmm20, %xmm15
-; AVX512BW-NEXT:    vpshufb %xmm27, %xmm11, %xmm11
-; AVX512BW-NEXT:    vpor %xmm15, %xmm11, %xmm11
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm19, %xmm15
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
-; AVX512BW-NEXT:    vpshufb %xmm19, %xmm25, %xmm20
-; AVX512BW-NEXT:    vporq %xmm15, %xmm20, %xmm15
-; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm15, %zmm15
-; AVX512BW-NEXT:    vmovdqu8 %zmm16, %zmm15 {%k5}
-; AVX512BW-NEXT:    vpshufb %ymm9, %ymm6, %ymm9
-; AVX512BW-NEXT:    vpshufb %ymm23, %ymm7, %ymm11
-; AVX512BW-NEXT:    vpor %ymm9, %ymm11, %ymm9
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm18, %xmm11
-; AVX512BW-NEXT:    vpshufb %xmm19, %xmm21, %xmm16
-; AVX512BW-NEXT:    vporq %xmm11, %xmm16, %xmm11
-; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm11 {%k4}
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm9
-; AVX512BW-NEXT:    vmovdqu8 %zmm9, %zmm15 {%k2}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vmovdqu16 %ymm24, %ymm0 {%k1}
-; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm11
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm11, %xmm16
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm0, %xmm18
-; AVX512BW-NEXT:    vporq %xmm16, %xmm18, %xmm16
-; AVX512BW-NEXT:    vmovdqu16 %ymm12, %ymm3 {%k3}
-; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm12
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm12, %xmm19
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm20 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX512BW-NEXT:    vpshufb %xmm20, %xmm3, %xmm21
-; AVX512BW-NEXT:    vporq %xmm19, %xmm21, %xmm19
-; AVX512BW-NEXT:    vinserti32x4 $1, %xmm19, %ymm0, %ymm19
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm16, %zmm19, %zmm16
-; AVX512BW-NEXT:    vmovdqu16 %ymm22, %ymm1 {%k1}
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm1, %xmm19
-; AVX512BW-NEXT:    vpshufb %xmm9, %xmm19, %xmm9
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm1, %xmm17
-; AVX512BW-NEXT:    vporq %xmm9, %xmm17, %xmm9
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [128,128,0,6,12,128,128,128,128,128,128,128,128,2,8,14,128,128,16,22,28,128,128,128,128,128,128,128,128,18,24,30]
-; AVX512BW-NEXT:    vpshufb %ymm17, %ymm5, %ymm21
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [4,10,128,128,128,128,128,128,128,128,0,6,12,128,128,128,20,26,128,128,128,128,128,128,128,128,16,22,28,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm22, %ymm4, %ymm23
-; AVX512BW-NEXT:    vporq %ymm21, %ymm23, %ymm13
-; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm13[5,6,7]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5,6,7]
+; AVX512BW-NEXT:    kmovq %rdi, %k6
+; AVX512BW-NEXT:    vmovdqu8 %zmm18, %zmm12 {%k6}
+; AVX512BW-NEXT:    vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
+; AVX512BW-NEXT:    vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm22, %xmm18, %xmm22
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm18, %xmm27
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm27, %xmm25
+; AVX512BW-NEXT:    vporq %xmm22, %xmm25, %xmm22
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512BW-NEXT:    vpshufb %ymm19, %ymm7, %ymm22 {%k5}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm19
+; AVX512BW-NEXT:    vmovdqu8 %zmm19, %zmm12 {%k3}
+; AVX512BW-NEXT:    movw $9289, %di # imm = 0x2449
+; AVX512BW-NEXT:    kmovd %edi, %k4
+; AVX512BW-NEXT:    vmovdqu16 %ymm14, %ymm4 {%k4}
+; AVX512BW-NEXT:    vmovdqu16 %ymm13, %ymm5 {%k4}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm13, %xmm16, %xmm14
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm8, %xmm8
+; AVX512BW-NEXT:    vpor %xmm14, %xmm8, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,3,9,15,5,11,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm14, %ymm15, %ymm8 {%k5}
+; AVX512BW-NEXT:    vpshufb %xmm13, %xmm21, %xmm13
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm20, %xmm15
+; AVX512BW-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm15, %xmm17, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm24, %xmm19
+; AVX512BW-NEXT:    vporq %xmm16, %xmm19, %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm13, %zmm16, %zmm13
+; AVX512BW-NEXT:    vmovdqu8 %zmm8, %zmm13 {%k6}
+; AVX512BW-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm27, %xmm15
+; AVX512BW-NEXT:    vpor %xmm8, %xmm15, %xmm8
+; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpshufb %ymm14, %ymm7, %ymm8 {%k5}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm13 {%k3}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,4,10,0,6,12,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vmovdqu16 %ymm10, %ymm3 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm15
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm15, %xmm10
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm3, %xmm17
+; AVX512BW-NEXT:    vporq %xmm10, %xmm17, %xmm10
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %ymm23, %ymm0 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm0, %xmm16
+; AVX512BW-NEXT:    vporq %xmm14, %xmm16, %xmm14
+; AVX512BW-NEXT:    vmovdqu16 %ymm26, %ymm1 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm16, %xmm18
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm19, %xmm1, %xmm20
+; AVX512BW-NEXT:    vporq %xmm18, %xmm20, %xmm18
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm14, %zmm18, %zmm14
 ; AVX512BW-NEXT:    movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
 ; AVX512BW-NEXT:    kmovq %rdi, %k1
-; AVX512BW-NEXT:    vmovdqu8 %zmm16, %zmm9 {%k1}
-; AVX512BW-NEXT:    vpshufb %ymm17, %ymm7, %ymm13
-; AVX512BW-NEXT:    vpshufb %ymm22, %ymm6, %ymm16
-; AVX512BW-NEXT:    vporq %ymm13, %ymm16, %ymm13
-; AVX512BW-NEXT:    vmovdqu16 %ymm28, %ymm8 {%k3}
-; AVX512BW-NEXT:    vextracti32x4 $1, %ymm8, %xmm16
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm16, %xmm17
-; AVX512BW-NEXT:    vpshufb %xmm20, %xmm8, %xmm18
-; AVX512BW-NEXT:    vporq %xmm17, %xmm18, %xmm17
-; AVX512BW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
+; AVX512BW-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k1}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
+; AVX512BW-NEXT:    vmovdqu16 %ymm11, %ymm6 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm11, %xmm14
+; AVX512BW-NEXT:    vpshufb %xmm19, %xmm6, %xmm17
+; AVX512BW-NEXT:    vporq %xmm14, %xmm17, %xmm14
+; AVX512BW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
 ; AVX512BW-NEXT:    kmovd %edi, %k2
-; AVX512BW-NEXT:    vmovdqu8 %ymm17, %ymm13 {%k2}
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm13
-; AVX512BW-NEXT:    vmovdqu16 %zmm13, %zmm9 {%k2}
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vpshufb %xmm13, %xmm11, %xmm11
+; AVX512BW-NEXT:    vmovdqu8 %ymm14, %ymm7 {%k2}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu16 %zmm7, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,5,11,1,7,13,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
 ; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm3, %xmm3
+; AVX512BW-NEXT:    vpor %xmm3, %xmm15, %xmm3
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm8, %xmm5
 ; AVX512BW-NEXT:    vpshufb %xmm17, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpor %xmm0, %xmm11, %xmm0
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm3, %xmm3
-; AVX512BW-NEXT:    vpor %xmm3, %xmm12, %xmm3
-; AVX512BW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512BW-NEXT:    vpshufb %xmm13, %xmm19, %xmm3
-; AVX512BW-NEXT:    vpshufb %xmm17, %xmm1, %xmm1
-; AVX512BW-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,1,7,13,128,128,128,128,128,128,128,128,3,9,15,128,128,17,23,29,128,128,128,128,128,128,128,128,19,25,31]
-; AVX512BW-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm12 = [5,11,128,128,128,128,128,128,128,128,1,7,13,128,128,128,21,27,128,128,128,128,128,128,128,128,17,23,29,128,128,128]
-; AVX512BW-NEXT:    vpshufb %ymm12, %ymm4, %ymm4
-; AVX512BW-NEXT:    vpor %ymm5, %ymm4, %ymm4
-; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm4[5,6,7]
-; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT:    vpshufb %ymm3, %ymm7, %ymm0
-; AVX512BW-NEXT:    vpshufb %ymm12, %ymm6, %ymm3
-; AVX512BW-NEXT:    vpor %ymm0, %ymm3, %ymm0
-; AVX512BW-NEXT:    vpshufb %xmm11, %xmm16, %xmm3
-; AVX512BW-NEXT:    vpshufb %xmm18, %xmm8, %xmm4
-; AVX512BW-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-NEXT:    vmovdqu8 %ymm3, %ymm0 {%k2}
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm16, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm8, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm4, %ymm0
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm11, %xmm1
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k2}
 ; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rcx)
-; AVX512BW-NEXT:    vmovdqa64 %zmm15, (%r8)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%r9)
-; AVX512BW-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %wide.vec = load <384 x i8>, ptr %in.vec, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index e4d7accf3c943..910c59fc09099 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -433,7 +433,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm9
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4],ymm9[5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
 ; AVX2-SLOW-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
 ; AVX2-SLOW-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
 ; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
@@ -476,7 +476,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm6
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm7
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm8 = ymm6[1,1,1,1,5,5,5,5]
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4],ymm7[5],ymm8[6,7]
 ; AVX2-FAST-NEXT:    vmovaps {{.*#+}} ymm8 = <u,1,5,u,u,u,u,u>
 ; AVX2-FAST-NEXT:    vpermps %ymm3, %ymm8, %ymm8
 ; AVX2-FAST-NEXT:    vbroadcastf128 {{.*#+}} ymm9 = [5,0,2,6,5,0,2,6]
@@ -542,7 +542,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3,4],ymm8[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm9
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm10 = ymm7[1,1,1,1,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4],ymm9[5],ymm10[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm0[1],zero
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastf128 {{.*#+}} ymm10 = [5,0,2,6,5,0,2,6]
 ; AVX2-FAST-PERLANE-NEXT:    # ymm10 = mem[0,1,0,1]
@@ -784,7 +784,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm10[1,2],ymm9[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps (%rax), %xmm10
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm12
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0],ymm12[1],ymm4[2,3,4],ymm12[5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm12[5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1,2],ymm4[3,4,5],ymm9[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
@@ -806,7 +806,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm9 = ymm8[1,1],ymm9[0,2],ymm8[5,5],ymm9[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 16(%rax), %xmm12
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4],ymm12[5],ymm9[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm12[1],ymm9[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm9[0,1],ymm4[2,3,4,5],ymm9[6,7]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm9 = ymm3[3,3],ymm2[3,3],ymm3[7,7],ymm2[7,7]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3,2,3]
@@ -864,7 +864,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm10 = xmm5[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm10, %ymm10
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4],ymm9[5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps (%rcx), %xmm12
 ; AVX2-SLOW-NEXT:    vmovaps (%rdx), %xmm13
 ; AVX2-SLOW-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
@@ -884,7 +884,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 16(%rax), %xmm9
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4],ymm9[5],ymm11[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm9 = xmm14[3,3],xmm15[3,3]
@@ -969,7 +969,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm9 = xmm5[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm9 = xmm9[0],xmm4[1],xmm9[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm9, %ymm9
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0],ymm6[1],ymm9[2,3,4],ymm6[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm6 = ymm9[0,1,2,3,4],ymm6[5],ymm9[6,7]
 ; AVX2-FAST-NEXT:    vmovaps (%rcx), %xmm11
 ; AVX2-FAST-NEXT:    vmovaps (%rdx), %xmm12
 ; AVX2-FAST-NEXT:    vinsertps {{.*#+}} xmm9 = zero,xmm12[1],xmm11[1],zero
@@ -989,7 +989,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm7[2],ymm9[3,4,5],ymm7[6],ymm9[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm9 = ymm9[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 16(%rax), %xmm15
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4],ymm15[5],ymm9[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0],ymm15[1],ymm9[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1],ymm6[2,3,4,5],ymm9[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} ymm6 = ymm2[1,1,1,1,5,5,5,5]
@@ -1076,7 +1076,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm10 = xmm5[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm10 = xmm10[0],xmm4[1],xmm10[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm10, %ymm10
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2,3,4],ymm9[5],ymm10[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3,4],ymm9[5],ymm10[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rcx), %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rdx), %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm13[1],xmm12[1],zero
@@ -1096,7 +1096,7 @@ define void @store_i32_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm6[2],ymm11[3,4,5],ymm6[6],ymm11[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm11 = ymm11[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 16(%rax), %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4],ymm9[5],ymm11[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5],ymm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vshufps {{.*#+}} xmm9 = xmm14[3,3],xmm15[3,3]
@@ -1620,7 +1620,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm3 = xmm9[1,1],xmm10[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm4 = xmm11[1,1],xmm4[0,2]
@@ -1660,7 +1660,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovaps (%rax), %xmm2
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
@@ -1700,7 +1700,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm3 = ymm1[1,1],ymm3[0,2],ymm1[5,5],ymm3[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 16(%rax), %xmm5
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3,4,5],ymm3[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps $255, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
@@ -1826,7 +1826,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps (%rcx), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rcx), %xmm11
@@ -1847,7 +1847,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm5 = xmm14[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm5, %ymm5
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm5 = xmm12[1,1,2,2]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2],xmm5[3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm6 = ymm5[0,1,2,1]
@@ -1875,7 +1875,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4,5],ymm0[6],ymm6[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 16(%rax), %xmm14
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm14
@@ -2078,7 +2078,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps %xmm4, %xmm14
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovaps (%rcx), %xmm11
 ; AVX2-FAST-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rcx), %xmm9
@@ -2100,7 +2100,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm8[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm10[1,1,2,2]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm12[2],xmm1[3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -2130,7 +2130,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 16(%rax), %xmm4
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4],ymm4[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm13
@@ -2332,7 +2332,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rcx), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rcx), %xmm11
@@ -2353,7 +2353,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm5 = xmm14[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm5, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm5 = xmm12[1,1,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2],xmm5[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm6 = ymm5[0,1,2,1]
@@ -2381,7 +2381,7 @@ define void @store_i32_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4,5],ymm0[6],ymm6[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm6 = ymm6[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 16(%rax), %xmm14
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4],ymm14[5],ymm6[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0],ymm14[1],ymm6[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm14
@@ -3480,7 +3480,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm3[1,1],xmm4[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm5[1,1],xmm1[0,2]
@@ -3544,7 +3544,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm8[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm8[1,1],xmm1[0,2]
@@ -3607,7 +3607,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm4[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm7[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[1,1],xmm1[0,2]
@@ -3665,7 +3665,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm0[1,1],xmm4[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm7[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm7[1,1],xmm1[0,2]
@@ -3688,7 +3688,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 16(%rax), %xmm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -3703,7 +3703,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm11[1,1],ymm1[0,2],ymm11[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 48(%rax), %xmm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -3716,7 +3716,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm13[1,1],ymm1[0,2],ymm13[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 80(%rax), %xmm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm14[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
@@ -3978,7 +3978,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm8[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps (%rcx), %xmm10
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rcx), %xmm11
 ; AVX2-SLOW-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -4000,7 +4000,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm9[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm12[1,1,2,2]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2],xmm1[3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -4019,7 +4019,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -4042,7 +4042,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -4078,7 +4078,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -4101,7 +4101,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm1
@@ -4124,7 +4124,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm3
@@ -4500,7 +4500,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovaps (%rcx), %xmm6
 ; AVX2-FAST-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rcx), %xmm3
@@ -4525,7 +4525,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm5[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm8[1,1,2,2]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm7[2],xmm1[3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -4545,7 +4545,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -4572,7 +4572,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -4608,7 +4608,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 16(%rax), %xmm11
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4],ymm11[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -4631,7 +4631,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 48(%rax), %xmm15
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm0
@@ -4654,7 +4654,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 80(%rax), %xmm15
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4],ymm15[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm15[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm3
@@ -5021,7 +5021,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm8[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm14[1],xmm1[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rcx), %xmm10
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rcx), %xmm11
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -5043,7 +5043,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm9[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm12[1,1,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm13[2],xmm1[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -5062,7 +5062,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -5085,7 +5085,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -5121,7 +5121,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -5144,7 +5144,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm1
@@ -5167,7 +5167,7 @@ define void @store_i32_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm3
@@ -7267,7 +7267,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm5[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
@@ -7330,7 +7330,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm5[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
@@ -7394,7 +7394,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm5[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm6[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm6[1,1],xmm1[0,2]
@@ -7457,7 +7457,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm5[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
@@ -7521,7 +7521,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm9[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
@@ -7585,7 +7585,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm9[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm5[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm4[1,1],xmm1[0,2]
@@ -7646,7 +7646,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm0 = xmm3[1,1],xmm8[1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4],ymm2[5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm1 = xmm11[1,1],xmm1[0,2]
@@ -7687,7 +7687,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm13[1,1],ymm1[0,2],ymm13[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7703,7 +7703,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7720,7 +7720,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm14[1,1],ymm1[0,2],ymm14[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7736,7 +7736,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 112(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7753,7 +7753,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 144(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -7767,7 +7767,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm2[1,1],ymm1[0,2],ymm2[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 176(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7783,7 +7783,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vshufps {{.*#+}} ymm1 = ymm3[1,1],ymm1[0,2],ymm3[5,5],ymm1[4,6]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 208(%rax), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 224(%rsi), %xmm1
@@ -8315,7 +8315,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps (%rcx), %xmm9
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rcx), %xmm3
 ; AVX2-SLOW-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8339,7 +8339,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm7[1,1,1,1]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
 ; AVX2-SLOW-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vpermilps {{.*#+}} xmm1 = xmm11[1,1,2,2]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2],xmm1[3]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -8358,7 +8358,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -8383,7 +8383,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -8409,7 +8409,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 128(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 128(%rsi), %xmm1
@@ -8435,7 +8435,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rsi), %xmm1
@@ -8461,7 +8461,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vmovaps 192(%rax), %xmm1
 ; AVX2-SLOW-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %xmm2
 ; AVX2-SLOW-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 192(%rsi), %xmm1
@@ -8497,7 +8497,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -8520,7 +8520,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 64(%rdi), %ymm1
@@ -8543,7 +8543,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 96(%rdi), %ymm1
@@ -8566,7 +8566,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 112(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 128(%rdi), %ymm1
@@ -8589,7 +8589,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 144(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 160(%rdi), %ymm1
@@ -8612,7 +8612,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 176(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 192(%rdi), %ymm0
@@ -8635,7 +8635,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vmovaps 208(%rax), %xmm2
-; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovaps 224(%rcx), %xmm0
@@ -9387,7 +9387,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovaps (%rcx), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rcx), %xmm5
@@ -9415,7 +9415,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm4[1,1,1,1]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3]
 ; AVX2-FAST-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpermilps {{.*#+}} xmm1 = xmm7[1,1,2,2]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm6[2],xmm1[3]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -9435,7 +9435,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -9461,7 +9461,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -9487,7 +9487,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 128(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 128(%rsi), %xmm1
@@ -9513,7 +9513,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 160(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 160(%rsi), %xmm1
@@ -9539,7 +9539,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vmovaps 192(%rax), %xmm1
 ; AVX2-FAST-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %xmm2
 ; AVX2-FAST-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 192(%rsi), %xmm1
@@ -9575,7 +9575,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -9598,7 +9598,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 64(%rdi), %ymm0
@@ -9621,7 +9621,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 96(%rdi), %ymm1
@@ -9644,7 +9644,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 112(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 128(%rdi), %ymm1
@@ -9667,7 +9667,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 144(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 160(%rdi), %ymm1
@@ -9690,7 +9690,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 176(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 192(%rdi), %ymm1
@@ -9714,7 +9714,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-NEXT:    vmovaps 208(%rax), %xmm2
-; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-NEXT:    vmovaps 224(%rdi), %xmm7
@@ -10460,7 +10460,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps (%rcx), %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rcx), %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -10484,7 +10484,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm7[1,1,1,1]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcastsd %xmm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} xmm1 = xmm11[1,1,2,2]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm10[2],xmm1[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
@@ -10503,7 +10503,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rsi), %xmm1
@@ -10528,7 +10528,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rsi), %xmm1
@@ -10554,7 +10554,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rsi), %xmm1
@@ -10580,7 +10580,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rsi), %xmm1
@@ -10606,7 +10606,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rax), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rsi), %xmm1
@@ -10642,7 +10642,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 16(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %ymm1
@@ -10665,7 +10665,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 48(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 64(%rdi), %ymm1
@@ -10688,7 +10688,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 80(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 96(%rdi), %ymm1
@@ -10711,7 +10711,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 112(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 128(%rdi), %ymm1
@@ -10734,7 +10734,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 144(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 160(%rdi), %ymm1
@@ -10757,7 +10757,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 176(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 192(%rdi), %ymm0
@@ -10780,7 +10780,7 @@ define void @store_i32_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5],ymm2[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermpd {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 208(%rax), %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4],ymm2[5],ymm1[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovaps 224(%rcx), %xmm0

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 9d5f91f47d17d..ca68479d60ddc 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -2437,7 +2437,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[0],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,u,0,u,0,u,0,u,0,u,0,u,0,u,0,u,16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero,ymm2[16],zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
@@ -2731,7 +2731,7 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0],zero,zero,zero,ymm2[0],zero,zero,zero,ymm2[0],zero,zero,zero,ymm2[0],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u,16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero,ymm2[16],zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -3026,7 +3026,7 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.v
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
 ; AVX2-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0],zero,zero,zero,zero,zero,zero,zero,ymm2[0],zero,zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u,16],zero,zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -3464,9 +3464,8 @@ define void @vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2(ptr %i
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm1, %xmm1
-; AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX2-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpbroadcastb %xmm1, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = <255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255>
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -4679,7 +4678,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-SLOW-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-SLOW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-SLOW-NEXT:    vpbroadcastd %xmm0, %xmm0
@@ -4699,7 +4698,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-FAST-PERLANE-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
@@ -4717,7 +4716,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-FAST-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
 ; AVX2-FAST-NEXT:    vpbroadcastq %xmm0, %ymm2
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index 96f70a4a1bf7a..74271a48e0bac 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -1918,7 +1918,7 @@ define void @vec384_i8_widen_to_i16_factor2_broadcast_to_v24i16_factor24(ptr %in
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero,xmm0[0],zero
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[0],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,0,u,0,u,0,u,0,u,0,u,0,u,0,u,16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero,ymm1[16],zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm1, %ymm2, %ymm1
@@ -2170,7 +2170,7 @@ define void @vec384_i8_widen_to_i32_factor4_broadcast_to_v12i32_factor12(ptr %in
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero,xmm0[0],zero,zero,zero
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0],zero,zero,zero,ymm1[0],zero,zero,zero,ymm1[0],zero,zero,zero,ymm1[0],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,u,u,0,u,u,u,0,u,u,u,0,u,u,u,16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16],zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
@@ -2422,7 +2422,7 @@ define void @vec384_i8_widen_to_i64_factor8_broadcast_to_v6i64_factor6(ptr %in.e
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0],zero,zero,zero,zero,zero,zero,zero,ymm1[0],zero,zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,u,u,u,u,u,u,u,0,u,u,u,u,u,u,u,16],zero,zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm1[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
@@ -2798,9 +2798,8 @@ define void @vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2(ptr %i
 ; AVX2-LABEL: vec384_i8_widen_to_i192_factor24_broadcast_to_v2i192_factor2:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm1
-; AVX2-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5]
-; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpbroadcastb %xmm0, %ymm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = <255,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255>
 ; AVX2-NEXT:    vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm1 = mem[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
@@ -3776,7 +3775,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX2-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-SLOW-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
@@ -3793,7 +3792,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpaddb (%rsi), %ymm1, %ymm1
@@ -3810,7 +3809,7 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = mem[0,1,0,1]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5]
-; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
+; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6],ymm2[7]
 ; AVX2-FAST-NEXT:    vpaddb (%rsi), %ymm1, %ymm1


        


More information about the llvm-commits mailing list